Merge drm/drm-next into drm-misc-next
[linux-2.6-block.git] / drivers / gpu / drm / i915 / intel_display.c
1 /*
2  * Copyright © 2006-2007 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *      Eric Anholt <eric@anholt.net>
25  */
26
27 #include <linux/dmi.h>
28 #include <linux/module.h>
29 #include <linux/input.h>
30 #include <linux/i2c.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/vgaarb.h>
34 #include <drm/drm_edid.h>
35 #include <drm/drmP.h>
36 #include "intel_drv.h"
37 #include "intel_frontbuffer.h"
38 #include <drm/i915_drm.h>
39 #include "i915_drv.h"
40 #include "i915_gem_clflush.h"
41 #include "intel_dsi.h"
42 #include "i915_trace.h"
43 #include <drm/drm_atomic.h>
44 #include <drm/drm_atomic_helper.h>
45 #include <drm/drm_dp_helper.h>
46 #include <drm/drm_crtc_helper.h>
47 #include <drm/drm_plane_helper.h>
48 #include <drm/drm_rect.h>
49 #include <linux/dma_remapping.h>
50 #include <linux/reservation.h>
51
52 /* Primary plane formats for gen <= 3 */
53 static const uint32_t i8xx_primary_formats[] = {
54         DRM_FORMAT_C8,
55         DRM_FORMAT_RGB565,
56         DRM_FORMAT_XRGB1555,
57         DRM_FORMAT_XRGB8888,
58 };
59
60 /* Primary plane formats for gen >= 4 */
61 static const uint32_t i965_primary_formats[] = {
62         DRM_FORMAT_C8,
63         DRM_FORMAT_RGB565,
64         DRM_FORMAT_XRGB8888,
65         DRM_FORMAT_XBGR8888,
66         DRM_FORMAT_XRGB2101010,
67         DRM_FORMAT_XBGR2101010,
68 };
69
70 static const uint64_t i9xx_format_modifiers[] = {
71         I915_FORMAT_MOD_X_TILED,
72         DRM_FORMAT_MOD_LINEAR,
73         DRM_FORMAT_MOD_INVALID
74 };
75
76 static const uint32_t skl_primary_formats[] = {
77         DRM_FORMAT_C8,
78         DRM_FORMAT_RGB565,
79         DRM_FORMAT_XRGB8888,
80         DRM_FORMAT_XBGR8888,
81         DRM_FORMAT_ARGB8888,
82         DRM_FORMAT_ABGR8888,
83         DRM_FORMAT_XRGB2101010,
84         DRM_FORMAT_XBGR2101010,
85         DRM_FORMAT_YUYV,
86         DRM_FORMAT_YVYU,
87         DRM_FORMAT_UYVY,
88         DRM_FORMAT_VYUY,
89 };
90
91 static const uint32_t skl_pri_planar_formats[] = {
92         DRM_FORMAT_C8,
93         DRM_FORMAT_RGB565,
94         DRM_FORMAT_XRGB8888,
95         DRM_FORMAT_XBGR8888,
96         DRM_FORMAT_ARGB8888,
97         DRM_FORMAT_ABGR8888,
98         DRM_FORMAT_XRGB2101010,
99         DRM_FORMAT_XBGR2101010,
100         DRM_FORMAT_YUYV,
101         DRM_FORMAT_YVYU,
102         DRM_FORMAT_UYVY,
103         DRM_FORMAT_VYUY,
104         DRM_FORMAT_NV12,
105 };
106
107 static const uint64_t skl_format_modifiers_noccs[] = {
108         I915_FORMAT_MOD_Yf_TILED,
109         I915_FORMAT_MOD_Y_TILED,
110         I915_FORMAT_MOD_X_TILED,
111         DRM_FORMAT_MOD_LINEAR,
112         DRM_FORMAT_MOD_INVALID
113 };
114
115 static const uint64_t skl_format_modifiers_ccs[] = {
116         I915_FORMAT_MOD_Yf_TILED_CCS,
117         I915_FORMAT_MOD_Y_TILED_CCS,
118         I915_FORMAT_MOD_Yf_TILED,
119         I915_FORMAT_MOD_Y_TILED,
120         I915_FORMAT_MOD_X_TILED,
121         DRM_FORMAT_MOD_LINEAR,
122         DRM_FORMAT_MOD_INVALID
123 };
124
125 /* Cursor formats */
126 static const uint32_t intel_cursor_formats[] = {
127         DRM_FORMAT_ARGB8888,
128 };
129
130 static const uint64_t cursor_format_modifiers[] = {
131         DRM_FORMAT_MOD_LINEAR,
132         DRM_FORMAT_MOD_INVALID
133 };
134
135 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
136                                 struct intel_crtc_state *pipe_config);
137 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
138                                    struct intel_crtc_state *pipe_config);
139
140 static int intel_framebuffer_init(struct intel_framebuffer *ifb,
141                                   struct drm_i915_gem_object *obj,
142                                   struct drm_mode_fb_cmd2 *mode_cmd);
143 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc);
144 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc);
145 static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc);
146 static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
147                                          struct intel_link_m_n *m_n,
148                                          struct intel_link_m_n *m2_n2);
149 static void ironlake_set_pipeconf(struct drm_crtc *crtc);
150 static void haswell_set_pipeconf(struct drm_crtc *crtc);
151 static void haswell_set_pipemisc(struct drm_crtc *crtc);
152 static void vlv_prepare_pll(struct intel_crtc *crtc,
153                             const struct intel_crtc_state *pipe_config);
154 static void chv_prepare_pll(struct intel_crtc *crtc,
155                             const struct intel_crtc_state *pipe_config);
156 static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
157 static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
158 static void intel_crtc_init_scalers(struct intel_crtc *crtc,
159                                     struct intel_crtc_state *crtc_state);
160 static void skylake_pfit_enable(struct intel_crtc *crtc);
161 static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
162 static void ironlake_pfit_enable(struct intel_crtc *crtc);
163 static void intel_modeset_setup_hw_state(struct drm_device *dev,
164                                          struct drm_modeset_acquire_ctx *ctx);
165 static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
166
167 struct intel_limit {
168         struct {
169                 int min, max;
170         } dot, vco, n, m, m1, m2, p, p1;
171
172         struct {
173                 int dot_limit;
174                 int p2_slow, p2_fast;
175         } p2;
176 };
177
178 /* returns HPLL frequency in kHz */
179 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
180 {
181         int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
182
183         /* Obtain SKU information */
184         mutex_lock(&dev_priv->sb_lock);
185         hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
186                 CCK_FUSE_HPLL_FREQ_MASK;
187         mutex_unlock(&dev_priv->sb_lock);
188
189         return vco_freq[hpll_freq] * 1000;
190 }
191
192 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
193                       const char *name, u32 reg, int ref_freq)
194 {
195         u32 val;
196         int divider;
197
198         mutex_lock(&dev_priv->sb_lock);
199         val = vlv_cck_read(dev_priv, reg);
200         mutex_unlock(&dev_priv->sb_lock);
201
202         divider = val & CCK_FREQUENCY_VALUES;
203
204         WARN((val & CCK_FREQUENCY_STATUS) !=
205              (divider << CCK_FREQUENCY_STATUS_SHIFT),
206              "%s change in progress\n", name);
207
208         return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
209 }
210
211 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
212                            const char *name, u32 reg)
213 {
214         if (dev_priv->hpll_freq == 0)
215                 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
216
217         return vlv_get_cck_clock(dev_priv, name, reg,
218                                  dev_priv->hpll_freq);
219 }
220
221 static void intel_update_czclk(struct drm_i915_private *dev_priv)
222 {
223         if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
224                 return;
225
226         dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
227                                                       CCK_CZ_CLOCK_CONTROL);
228
229         DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
230 }
231
232 static inline u32 /* units of 100MHz */
233 intel_fdi_link_freq(struct drm_i915_private *dev_priv,
234                     const struct intel_crtc_state *pipe_config)
235 {
236         if (HAS_DDI(dev_priv))
237                 return pipe_config->port_clock; /* SPLL */
238         else
239                 return dev_priv->fdi_pll_freq;
240 }
241
242 static const struct intel_limit intel_limits_i8xx_dac = {
243         .dot = { .min = 25000, .max = 350000 },
244         .vco = { .min = 908000, .max = 1512000 },
245         .n = { .min = 2, .max = 16 },
246         .m = { .min = 96, .max = 140 },
247         .m1 = { .min = 18, .max = 26 },
248         .m2 = { .min = 6, .max = 16 },
249         .p = { .min = 4, .max = 128 },
250         .p1 = { .min = 2, .max = 33 },
251         .p2 = { .dot_limit = 165000,
252                 .p2_slow = 4, .p2_fast = 2 },
253 };
254
255 static const struct intel_limit intel_limits_i8xx_dvo = {
256         .dot = { .min = 25000, .max = 350000 },
257         .vco = { .min = 908000, .max = 1512000 },
258         .n = { .min = 2, .max = 16 },
259         .m = { .min = 96, .max = 140 },
260         .m1 = { .min = 18, .max = 26 },
261         .m2 = { .min = 6, .max = 16 },
262         .p = { .min = 4, .max = 128 },
263         .p1 = { .min = 2, .max = 33 },
264         .p2 = { .dot_limit = 165000,
265                 .p2_slow = 4, .p2_fast = 4 },
266 };
267
268 static const struct intel_limit intel_limits_i8xx_lvds = {
269         .dot = { .min = 25000, .max = 350000 },
270         .vco = { .min = 908000, .max = 1512000 },
271         .n = { .min = 2, .max = 16 },
272         .m = { .min = 96, .max = 140 },
273         .m1 = { .min = 18, .max = 26 },
274         .m2 = { .min = 6, .max = 16 },
275         .p = { .min = 4, .max = 128 },
276         .p1 = { .min = 1, .max = 6 },
277         .p2 = { .dot_limit = 165000,
278                 .p2_slow = 14, .p2_fast = 7 },
279 };
280
281 static const struct intel_limit intel_limits_i9xx_sdvo = {
282         .dot = { .min = 20000, .max = 400000 },
283         .vco = { .min = 1400000, .max = 2800000 },
284         .n = { .min = 1, .max = 6 },
285         .m = { .min = 70, .max = 120 },
286         .m1 = { .min = 8, .max = 18 },
287         .m2 = { .min = 3, .max = 7 },
288         .p = { .min = 5, .max = 80 },
289         .p1 = { .min = 1, .max = 8 },
290         .p2 = { .dot_limit = 200000,
291                 .p2_slow = 10, .p2_fast = 5 },
292 };
293
294 static const struct intel_limit intel_limits_i9xx_lvds = {
295         .dot = { .min = 20000, .max = 400000 },
296         .vco = { .min = 1400000, .max = 2800000 },
297         .n = { .min = 1, .max = 6 },
298         .m = { .min = 70, .max = 120 },
299         .m1 = { .min = 8, .max = 18 },
300         .m2 = { .min = 3, .max = 7 },
301         .p = { .min = 7, .max = 98 },
302         .p1 = { .min = 1, .max = 8 },
303         .p2 = { .dot_limit = 112000,
304                 .p2_slow = 14, .p2_fast = 7 },
305 };
306
307
308 static const struct intel_limit intel_limits_g4x_sdvo = {
309         .dot = { .min = 25000, .max = 270000 },
310         .vco = { .min = 1750000, .max = 3500000},
311         .n = { .min = 1, .max = 4 },
312         .m = { .min = 104, .max = 138 },
313         .m1 = { .min = 17, .max = 23 },
314         .m2 = { .min = 5, .max = 11 },
315         .p = { .min = 10, .max = 30 },
316         .p1 = { .min = 1, .max = 3},
317         .p2 = { .dot_limit = 270000,
318                 .p2_slow = 10,
319                 .p2_fast = 10
320         },
321 };
322
323 static const struct intel_limit intel_limits_g4x_hdmi = {
324         .dot = { .min = 22000, .max = 400000 },
325         .vco = { .min = 1750000, .max = 3500000},
326         .n = { .min = 1, .max = 4 },
327         .m = { .min = 104, .max = 138 },
328         .m1 = { .min = 16, .max = 23 },
329         .m2 = { .min = 5, .max = 11 },
330         .p = { .min = 5, .max = 80 },
331         .p1 = { .min = 1, .max = 8},
332         .p2 = { .dot_limit = 165000,
333                 .p2_slow = 10, .p2_fast = 5 },
334 };
335
336 static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
337         .dot = { .min = 20000, .max = 115000 },
338         .vco = { .min = 1750000, .max = 3500000 },
339         .n = { .min = 1, .max = 3 },
340         .m = { .min = 104, .max = 138 },
341         .m1 = { .min = 17, .max = 23 },
342         .m2 = { .min = 5, .max = 11 },
343         .p = { .min = 28, .max = 112 },
344         .p1 = { .min = 2, .max = 8 },
345         .p2 = { .dot_limit = 0,
346                 .p2_slow = 14, .p2_fast = 14
347         },
348 };
349
350 static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
351         .dot = { .min = 80000, .max = 224000 },
352         .vco = { .min = 1750000, .max = 3500000 },
353         .n = { .min = 1, .max = 3 },
354         .m = { .min = 104, .max = 138 },
355         .m1 = { .min = 17, .max = 23 },
356         .m2 = { .min = 5, .max = 11 },
357         .p = { .min = 14, .max = 42 },
358         .p1 = { .min = 2, .max = 6 },
359         .p2 = { .dot_limit = 0,
360                 .p2_slow = 7, .p2_fast = 7
361         },
362 };
363
364 static const struct intel_limit intel_limits_pineview_sdvo = {
365         .dot = { .min = 20000, .max = 400000},
366         .vco = { .min = 1700000, .max = 3500000 },
367         /* Pineview's Ncounter is a ring counter */
368         .n = { .min = 3, .max = 6 },
369         .m = { .min = 2, .max = 256 },
370         /* Pineview only has one combined m divider, which we treat as m2. */
371         .m1 = { .min = 0, .max = 0 },
372         .m2 = { .min = 0, .max = 254 },
373         .p = { .min = 5, .max = 80 },
374         .p1 = { .min = 1, .max = 8 },
375         .p2 = { .dot_limit = 200000,
376                 .p2_slow = 10, .p2_fast = 5 },
377 };
378
379 static const struct intel_limit intel_limits_pineview_lvds = {
380         .dot = { .min = 20000, .max = 400000 },
381         .vco = { .min = 1700000, .max = 3500000 },
382         .n = { .min = 3, .max = 6 },
383         .m = { .min = 2, .max = 256 },
384         .m1 = { .min = 0, .max = 0 },
385         .m2 = { .min = 0, .max = 254 },
386         .p = { .min = 7, .max = 112 },
387         .p1 = { .min = 1, .max = 8 },
388         .p2 = { .dot_limit = 112000,
389                 .p2_slow = 14, .p2_fast = 14 },
390 };
391
392 /* Ironlake / Sandybridge
393  *
394  * We calculate clock using (register_value + 2) for N/M1/M2, so here
395  * the range value for them is (actual_value - 2).
396  */
397 static const struct intel_limit intel_limits_ironlake_dac = {
398         .dot = { .min = 25000, .max = 350000 },
399         .vco = { .min = 1760000, .max = 3510000 },
400         .n = { .min = 1, .max = 5 },
401         .m = { .min = 79, .max = 127 },
402         .m1 = { .min = 12, .max = 22 },
403         .m2 = { .min = 5, .max = 9 },
404         .p = { .min = 5, .max = 80 },
405         .p1 = { .min = 1, .max = 8 },
406         .p2 = { .dot_limit = 225000,
407                 .p2_slow = 10, .p2_fast = 5 },
408 };
409
410 static const struct intel_limit intel_limits_ironlake_single_lvds = {
411         .dot = { .min = 25000, .max = 350000 },
412         .vco = { .min = 1760000, .max = 3510000 },
413         .n = { .min = 1, .max = 3 },
414         .m = { .min = 79, .max = 118 },
415         .m1 = { .min = 12, .max = 22 },
416         .m2 = { .min = 5, .max = 9 },
417         .p = { .min = 28, .max = 112 },
418         .p1 = { .min = 2, .max = 8 },
419         .p2 = { .dot_limit = 225000,
420                 .p2_slow = 14, .p2_fast = 14 },
421 };
422
423 static const struct intel_limit intel_limits_ironlake_dual_lvds = {
424         .dot = { .min = 25000, .max = 350000 },
425         .vco = { .min = 1760000, .max = 3510000 },
426         .n = { .min = 1, .max = 3 },
427         .m = { .min = 79, .max = 127 },
428         .m1 = { .min = 12, .max = 22 },
429         .m2 = { .min = 5, .max = 9 },
430         .p = { .min = 14, .max = 56 },
431         .p1 = { .min = 2, .max = 8 },
432         .p2 = { .dot_limit = 225000,
433                 .p2_slow = 7, .p2_fast = 7 },
434 };
435
436 /* LVDS 100mhz refclk limits. */
437 static const struct intel_limit intel_limits_ironlake_single_lvds_100m = {
438         .dot = { .min = 25000, .max = 350000 },
439         .vco = { .min = 1760000, .max = 3510000 },
440         .n = { .min = 1, .max = 2 },
441         .m = { .min = 79, .max = 126 },
442         .m1 = { .min = 12, .max = 22 },
443         .m2 = { .min = 5, .max = 9 },
444         .p = { .min = 28, .max = 112 },
445         .p1 = { .min = 2, .max = 8 },
446         .p2 = { .dot_limit = 225000,
447                 .p2_slow = 14, .p2_fast = 14 },
448 };
449
450 static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = {
451         .dot = { .min = 25000, .max = 350000 },
452         .vco = { .min = 1760000, .max = 3510000 },
453         .n = { .min = 1, .max = 3 },
454         .m = { .min = 79, .max = 126 },
455         .m1 = { .min = 12, .max = 22 },
456         .m2 = { .min = 5, .max = 9 },
457         .p = { .min = 14, .max = 42 },
458         .p1 = { .min = 2, .max = 6 },
459         .p2 = { .dot_limit = 225000,
460                 .p2_slow = 7, .p2_fast = 7 },
461 };
462
463 static const struct intel_limit intel_limits_vlv = {
464          /*
465           * These are the data rate limits (measured in fast clocks)
466           * since those are the strictest limits we have. The fast
467           * clock and actual rate limits are more relaxed, so checking
468           * them would make no difference.
469           */
470         .dot = { .min = 25000 * 5, .max = 270000 * 5 },
471         .vco = { .min = 4000000, .max = 6000000 },
472         .n = { .min = 1, .max = 7 },
473         .m1 = { .min = 2, .max = 3 },
474         .m2 = { .min = 11, .max = 156 },
475         .p1 = { .min = 2, .max = 3 },
476         .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
477 };
478
479 static const struct intel_limit intel_limits_chv = {
480         /*
481          * These are the data rate limits (measured in fast clocks)
482          * since those are the strictest limits we have.  The fast
483          * clock and actual rate limits are more relaxed, so checking
484          * them would make no difference.
485          */
486         .dot = { .min = 25000 * 5, .max = 540000 * 5},
487         .vco = { .min = 4800000, .max = 6480000 },
488         .n = { .min = 1, .max = 1 },
489         .m1 = { .min = 2, .max = 2 },
490         .m2 = { .min = 24 << 22, .max = 175 << 22 },
491         .p1 = { .min = 2, .max = 4 },
492         .p2 = { .p2_slow = 1, .p2_fast = 14 },
493 };
494
495 static const struct intel_limit intel_limits_bxt = {
496         /* FIXME: find real dot limits */
497         .dot = { .min = 0, .max = INT_MAX },
498         .vco = { .min = 4800000, .max = 6700000 },
499         .n = { .min = 1, .max = 1 },
500         .m1 = { .min = 2, .max = 2 },
501         /* FIXME: find real m2 limits */
502         .m2 = { .min = 2 << 22, .max = 255 << 22 },
503         .p1 = { .min = 2, .max = 4 },
504         .p2 = { .p2_slow = 1, .p2_fast = 20 },
505 };
506
507 static void
508 skl_wa_528(struct drm_i915_private *dev_priv, int pipe, bool enable)
509 {
510         if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
511                 return;
512
513         if (enable)
514                 I915_WRITE(CHICKEN_PIPESL_1(pipe), HSW_FBCQ_DIS);
515         else
516                 I915_WRITE(CHICKEN_PIPESL_1(pipe), 0);
517 }
518
519 static void
520 skl_wa_clkgate(struct drm_i915_private *dev_priv, int pipe, bool enable)
521 {
522         if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
523                 return;
524
525         if (enable)
526                 I915_WRITE(CLKGATE_DIS_PSL(pipe),
527                            DUPS1_GATING_DIS | DUPS2_GATING_DIS);
528         else
529                 I915_WRITE(CLKGATE_DIS_PSL(pipe),
530                            I915_READ(CLKGATE_DIS_PSL(pipe)) &
531                            ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
532 }
533
534 static bool
535 needs_modeset(const struct drm_crtc_state *state)
536 {
537         return drm_atomic_crtc_needs_modeset(state);
538 }
539
540 /*
541  * Platform specific helpers to calculate the port PLL loopback- (clock.m),
542  * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
543  * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
544  * The helpers' return value is the rate of the clock that is fed to the
545  * display engine's pipe which can be the above fast dot clock rate or a
546  * divided-down version of it.
547  */
548 /* m1 is reserved as 0 in Pineview, n is a ring counter */
549 static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
550 {
551         clock->m = clock->m2 + 2;
552         clock->p = clock->p1 * clock->p2;
553         if (WARN_ON(clock->n == 0 || clock->p == 0))
554                 return 0;
555         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
556         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
557
558         return clock->dot;
559 }
560
561 static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
562 {
563         return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
564 }
565
566 static int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
567 {
568         clock->m = i9xx_dpll_compute_m(clock);
569         clock->p = clock->p1 * clock->p2;
570         if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
571                 return 0;
572         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
573         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
574
575         return clock->dot;
576 }
577
578 static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
579 {
580         clock->m = clock->m1 * clock->m2;
581         clock->p = clock->p1 * clock->p2;
582         if (WARN_ON(clock->n == 0 || clock->p == 0))
583                 return 0;
584         clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
585         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
586
587         return clock->dot / 5;
588 }
589
590 int chv_calc_dpll_params(int refclk, struct dpll *clock)
591 {
592         clock->m = clock->m1 * clock->m2;
593         clock->p = clock->p1 * clock->p2;
594         if (WARN_ON(clock->n == 0 || clock->p == 0))
595                 return 0;
596         clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m,
597                         clock->n << 22);
598         clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
599
600         return clock->dot / 5;
601 }
602
603 #define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
604
605 /*
606  * Returns whether the given set of divisors are valid for a given refclk with
607  * the given connectors.
608  */
609 static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv,
610                                const struct intel_limit *limit,
611                                const struct dpll *clock)
612 {
613         if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
614                 INTELPllInvalid("n out of range\n");
615         if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
616                 INTELPllInvalid("p1 out of range\n");
617         if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
618                 INTELPllInvalid("m2 out of range\n");
619         if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
620                 INTELPllInvalid("m1 out of range\n");
621
622         if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
623             !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv))
624                 if (clock->m1 <= clock->m2)
625                         INTELPllInvalid("m1 <= m2\n");
626
627         if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
628             !IS_GEN9_LP(dev_priv)) {
629                 if (clock->p < limit->p.min || limit->p.max < clock->p)
630                         INTELPllInvalid("p out of range\n");
631                 if (clock->m < limit->m.min || limit->m.max < clock->m)
632                         INTELPllInvalid("m out of range\n");
633         }
634
635         if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
636                 INTELPllInvalid("vco out of range\n");
637         /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
638          * connector, etc., rather than just a single range.
639          */
640         if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
641                 INTELPllInvalid("dot out of range\n");
642
643         return true;
644 }
645
646 static int
647 i9xx_select_p2_div(const struct intel_limit *limit,
648                    const struct intel_crtc_state *crtc_state,
649                    int target)
650 {
651         struct drm_device *dev = crtc_state->base.crtc->dev;
652
653         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
654                 /*
655                  * For LVDS just rely on its current settings for dual-channel.
656                  * We haven't figured out how to reliably set up different
657                  * single/dual channel state, if we even can.
658                  */
659                 if (intel_is_dual_link_lvds(dev))
660                         return limit->p2.p2_fast;
661                 else
662                         return limit->p2.p2_slow;
663         } else {
664                 if (target < limit->p2.dot_limit)
665                         return limit->p2.p2_slow;
666                 else
667                         return limit->p2.p2_fast;
668         }
669 }
670
671 /*
672  * Returns a set of divisors for the desired target clock with the given
673  * refclk, or FALSE.  The returned values represent the clock equation:
674  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
675  *
676  * Target and reference clocks are specified in kHz.
677  *
678  * If match_clock is provided, then best_clock P divider must match the P
679  * divider from @match_clock used for LVDS downclocking.
680  */
681 static bool
682 i9xx_find_best_dpll(const struct intel_limit *limit,
683                     struct intel_crtc_state *crtc_state,
684                     int target, int refclk, struct dpll *match_clock,
685                     struct dpll *best_clock)
686 {
687         struct drm_device *dev = crtc_state->base.crtc->dev;
688         struct dpll clock;
689         int err = target;
690
691         memset(best_clock, 0, sizeof(*best_clock));
692
693         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
694
695         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
696              clock.m1++) {
697                 for (clock.m2 = limit->m2.min;
698                      clock.m2 <= limit->m2.max; clock.m2++) {
699                         if (clock.m2 >= clock.m1)
700                                 break;
701                         for (clock.n = limit->n.min;
702                              clock.n <= limit->n.max; clock.n++) {
703                                 for (clock.p1 = limit->p1.min;
704                                         clock.p1 <= limit->p1.max; clock.p1++) {
705                                         int this_err;
706
707                                         i9xx_calc_dpll_params(refclk, &clock);
708                                         if (!intel_PLL_is_valid(to_i915(dev),
709                                                                 limit,
710                                                                 &clock))
711                                                 continue;
712                                         if (match_clock &&
713                                             clock.p != match_clock->p)
714                                                 continue;
715
716                                         this_err = abs(clock.dot - target);
717                                         if (this_err < err) {
718                                                 *best_clock = clock;
719                                                 err = this_err;
720                                         }
721                                 }
722                         }
723                 }
724         }
725
726         return (err != target);
727 }
728
729 /*
730  * Returns a set of divisors for the desired target clock with the given
731  * refclk, or FALSE.  The returned values represent the clock equation:
732  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
733  *
734  * Target and reference clocks are specified in kHz.
735  *
736  * If match_clock is provided, then best_clock P divider must match the P
737  * divider from @match_clock used for LVDS downclocking.
738  */
739 static bool
740 pnv_find_best_dpll(const struct intel_limit *limit,
741                    struct intel_crtc_state *crtc_state,
742                    int target, int refclk, struct dpll *match_clock,
743                    struct dpll *best_clock)
744 {
745         struct drm_device *dev = crtc_state->base.crtc->dev;
746         struct dpll clock;
747         int err = target;
748
749         memset(best_clock, 0, sizeof(*best_clock));
750
751         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
752
753         for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
754              clock.m1++) {
755                 for (clock.m2 = limit->m2.min;
756                      clock.m2 <= limit->m2.max; clock.m2++) {
757                         for (clock.n = limit->n.min;
758                              clock.n <= limit->n.max; clock.n++) {
759                                 for (clock.p1 = limit->p1.min;
760                                         clock.p1 <= limit->p1.max; clock.p1++) {
761                                         int this_err;
762
763                                         pnv_calc_dpll_params(refclk, &clock);
764                                         if (!intel_PLL_is_valid(to_i915(dev),
765                                                                 limit,
766                                                                 &clock))
767                                                 continue;
768                                         if (match_clock &&
769                                             clock.p != match_clock->p)
770                                                 continue;
771
772                                         this_err = abs(clock.dot - target);
773                                         if (this_err < err) {
774                                                 *best_clock = clock;
775                                                 err = this_err;
776                                         }
777                                 }
778                         }
779                 }
780         }
781
782         return (err != target);
783 }
784
785 /*
786  * Returns a set of divisors for the desired target clock with the given
787  * refclk, or FALSE.  The returned values represent the clock equation:
788  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
789  *
790  * Target and reference clocks are specified in kHz.
791  *
792  * If match_clock is provided, then best_clock P divider must match the P
793  * divider from @match_clock used for LVDS downclocking.
794  */
795 static bool
796 g4x_find_best_dpll(const struct intel_limit *limit,
797                    struct intel_crtc_state *crtc_state,
798                    int target, int refclk, struct dpll *match_clock,
799                    struct dpll *best_clock)
800 {
801         struct drm_device *dev = crtc_state->base.crtc->dev;
802         struct dpll clock;
803         int max_n;
804         bool found = false;
805         /* approximately equals target * 0.00585 */
806         int err_most = (target >> 8) + (target >> 9);
807
808         memset(best_clock, 0, sizeof(*best_clock));
809
810         clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
811
812         max_n = limit->n.max;
813         /* based on hardware requirement, prefer smaller n to precision */
814         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
815                 /* based on hardware requirement, prefere larger m1,m2 */
816                 for (clock.m1 = limit->m1.max;
817                      clock.m1 >= limit->m1.min; clock.m1--) {
818                         for (clock.m2 = limit->m2.max;
819                              clock.m2 >= limit->m2.min; clock.m2--) {
820                                 for (clock.p1 = limit->p1.max;
821                                      clock.p1 >= limit->p1.min; clock.p1--) {
822                                         int this_err;
823
824                                         i9xx_calc_dpll_params(refclk, &clock);
825                                         if (!intel_PLL_is_valid(to_i915(dev),
826                                                                 limit,
827                                                                 &clock))
828                                                 continue;
829
830                                         this_err = abs(clock.dot - target);
831                                         if (this_err < err_most) {
832                                                 *best_clock = clock;
833                                                 err_most = this_err;
834                                                 max_n = clock.n;
835                                                 found = true;
836                                         }
837                                 }
838                         }
839                 }
840         }
841         return found;
842 }
843
844 /*
845  * Check if the calculated PLL configuration is more optimal compared to the
846  * best configuration and error found so far. Return the calculated error.
847  */
848 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
849                                const struct dpll *calculated_clock,
850                                const struct dpll *best_clock,
851                                unsigned int best_error_ppm,
852                                unsigned int *error_ppm)
853 {
854         /*
855          * For CHV ignore the error and consider only the P value.
856          * Prefer a bigger P value based on HW requirements.
857          */
858         if (IS_CHERRYVIEW(to_i915(dev))) {
859                 *error_ppm = 0;
860
861                 return calculated_clock->p > best_clock->p;
862         }
863
864         if (WARN_ON_ONCE(!target_freq))
865                 return false;
866
867         *error_ppm = div_u64(1000000ULL *
868                                 abs(target_freq - calculated_clock->dot),
869                              target_freq);
870         /*
871          * Prefer a better P value over a better (smaller) error if the error
872          * is small. Ensure this preference for future configurations too by
873          * setting the error to 0.
874          */
875         if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
876                 *error_ppm = 0;
877
878                 return true;
879         }
880
881         return *error_ppm + 10 < best_error_ppm;
882 }
883
884 /*
885  * Returns a set of divisors for the desired target clock with the given
886  * refclk, or FALSE.  The returned values represent the clock equation:
887  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
888  */
889 static bool
890 vlv_find_best_dpll(const struct intel_limit *limit,
891                    struct intel_crtc_state *crtc_state,
892                    int target, int refclk, struct dpll *match_clock,
893                    struct dpll *best_clock)
894 {
895         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
896         struct drm_device *dev = crtc->base.dev;
897         struct dpll clock;
898         unsigned int bestppm = 1000000;
899         /* min update 19.2 MHz */
900         int max_n = min(limit->n.max, refclk / 19200);
901         bool found = false;
902
903         target *= 5; /* fast clock */
904
905         memset(best_clock, 0, sizeof(*best_clock));
906
907         /* based on hardware requirement, prefer smaller n to precision */
908         for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
909                 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
910                         for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
911                              clock.p2 -= clock.p2 > 10 ? 2 : 1) {
912                                 clock.p = clock.p1 * clock.p2;
913                                 /* based on hardware requirement, prefer bigger m1,m2 values */
914                                 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
915                                         unsigned int ppm;
916
917                                         clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
918                                                                      refclk * clock.m1);
919
920                                         vlv_calc_dpll_params(refclk, &clock);
921
922                                         if (!intel_PLL_is_valid(to_i915(dev),
923                                                                 limit,
924                                                                 &clock))
925                                                 continue;
926
927                                         if (!vlv_PLL_is_optimal(dev, target,
928                                                                 &clock,
929                                                                 best_clock,
930                                                                 bestppm, &ppm))
931                                                 continue;
932
933                                         *best_clock = clock;
934                                         bestppm = ppm;
935                                         found = true;
936                                 }
937                         }
938                 }
939         }
940
941         return found;
942 }
943
944 /*
945  * Returns a set of divisors for the desired target clock with the given
946  * refclk, or FALSE.  The returned values represent the clock equation:
947  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
948  */
949 static bool
950 chv_find_best_dpll(const struct intel_limit *limit,
951                    struct intel_crtc_state *crtc_state,
952                    int target, int refclk, struct dpll *match_clock,
953                    struct dpll *best_clock)
954 {
955         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
956         struct drm_device *dev = crtc->base.dev;
957         unsigned int best_error_ppm;
958         struct dpll clock;
959         uint64_t m2;
960         int found = false;
961
962         memset(best_clock, 0, sizeof(*best_clock));
963         best_error_ppm = 1000000;
964
965         /*
966          * Based on hardware doc, the n always set to 1, and m1 always
967          * set to 2.  If requires to support 200Mhz refclk, we need to
968          * revisit this because n may not 1 anymore.
969          */
970         clock.n = 1, clock.m1 = 2;
971         target *= 5;    /* fast clock */
972
973         for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
974                 for (clock.p2 = limit->p2.p2_fast;
975                                 clock.p2 >= limit->p2.p2_slow;
976                                 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
977                         unsigned int error_ppm;
978
979                         clock.p = clock.p1 * clock.p2;
980
981                         m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p *
982                                         clock.n) << 22, refclk * clock.m1);
983
984                         if (m2 > INT_MAX/clock.m1)
985                                 continue;
986
987                         clock.m2 = m2;
988
989                         chv_calc_dpll_params(refclk, &clock);
990
991                         if (!intel_PLL_is_valid(to_i915(dev), limit, &clock))
992                                 continue;
993
994                         if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
995                                                 best_error_ppm, &error_ppm))
996                                 continue;
997
998                         *best_clock = clock;
999                         best_error_ppm = error_ppm;
1000                         found = true;
1001                 }
1002         }
1003
1004         return found;
1005 }
1006
1007 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
1008                         struct dpll *best_clock)
1009 {
1010         int refclk = 100000;
1011         const struct intel_limit *limit = &intel_limits_bxt;
1012
1013         return chv_find_best_dpll(limit, crtc_state,
1014                                   target_clock, refclk, NULL, best_clock);
1015 }
1016
1017 bool intel_crtc_active(struct intel_crtc *crtc)
1018 {
1019         /* Be paranoid as we can arrive here with only partial
1020          * state retrieved from the hardware during setup.
1021          *
1022          * We can ditch the adjusted_mode.crtc_clock check as soon
1023          * as Haswell has gained clock readout/fastboot support.
1024          *
1025          * We can ditch the crtc->primary->state->fb check as soon as we can
1026          * properly reconstruct framebuffers.
1027          *
1028          * FIXME: The intel_crtc->active here should be switched to
1029          * crtc->state->active once we have proper CRTC states wired up
1030          * for atomic.
1031          */
1032         return crtc->active && crtc->base.primary->state->fb &&
1033                 crtc->config->base.adjusted_mode.crtc_clock;
1034 }
1035
1036 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
1037                                              enum pipe pipe)
1038 {
1039         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
1040
1041         return crtc->config->cpu_transcoder;
1042 }
1043
1044 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
1045                                     enum pipe pipe)
1046 {
1047         i915_reg_t reg = PIPEDSL(pipe);
1048         u32 line1, line2;
1049         u32 line_mask;
1050
1051         if (IS_GEN2(dev_priv))
1052                 line_mask = DSL_LINEMASK_GEN2;
1053         else
1054                 line_mask = DSL_LINEMASK_GEN3;
1055
1056         line1 = I915_READ(reg) & line_mask;
1057         msleep(5);
1058         line2 = I915_READ(reg) & line_mask;
1059
1060         return line1 != line2;
1061 }
1062
1063 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
1064 {
1065         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1066         enum pipe pipe = crtc->pipe;
1067
1068         /* Wait for the display line to settle/start moving */
1069         if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
1070                 DRM_ERROR("pipe %c scanline %s wait timed out\n",
1071                           pipe_name(pipe), onoff(state));
1072 }
1073
1074 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
1075 {
1076         wait_for_pipe_scanline_moving(crtc, false);
1077 }
1078
1079 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
1080 {
1081         wait_for_pipe_scanline_moving(crtc, true);
1082 }
1083
1084 static void
1085 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
1086 {
1087         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
1088         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1089
1090         if (INTEL_GEN(dev_priv) >= 4) {
1091                 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1092                 i915_reg_t reg = PIPECONF(cpu_transcoder);
1093
1094                 /* Wait for the Pipe State to go off */
1095                 if (intel_wait_for_register(dev_priv,
1096                                             reg, I965_PIPECONF_ACTIVE, 0,
1097                                             100))
1098                         WARN(1, "pipe_off wait timed out\n");
1099         } else {
1100                 intel_wait_for_pipe_scanline_stopped(crtc);
1101         }
1102 }
1103
1104 /* Only for pre-ILK configs */
1105 void assert_pll(struct drm_i915_private *dev_priv,
1106                 enum pipe pipe, bool state)
1107 {
1108         u32 val;
1109         bool cur_state;
1110
1111         val = I915_READ(DPLL(pipe));
1112         cur_state = !!(val & DPLL_VCO_ENABLE);
1113         I915_STATE_WARN(cur_state != state,
1114              "PLL state assertion failure (expected %s, current %s)\n",
1115                         onoff(state), onoff(cur_state));
1116 }
1117
1118 /* XXX: the dsi pll is shared between MIPI DSI ports */
1119 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
1120 {
1121         u32 val;
1122         bool cur_state;
1123
1124         mutex_lock(&dev_priv->sb_lock);
1125         val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
1126         mutex_unlock(&dev_priv->sb_lock);
1127
1128         cur_state = val & DSI_PLL_VCO_EN;
1129         I915_STATE_WARN(cur_state != state,
1130              "DSI PLL state assertion failure (expected %s, current %s)\n",
1131                         onoff(state), onoff(cur_state));
1132 }
1133
1134 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1135                           enum pipe pipe, bool state)
1136 {
1137         bool cur_state;
1138         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1139                                                                       pipe);
1140
1141         if (HAS_DDI(dev_priv)) {
1142                 /* DDI does not have a specific FDI_TX register */
1143                 u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
1144                 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1145         } else {
1146                 u32 val = I915_READ(FDI_TX_CTL(pipe));
1147                 cur_state = !!(val & FDI_TX_ENABLE);
1148         }
1149         I915_STATE_WARN(cur_state != state,
1150              "FDI TX state assertion failure (expected %s, current %s)\n",
1151                         onoff(state), onoff(cur_state));
1152 }
1153 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1154 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1155
1156 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1157                           enum pipe pipe, bool state)
1158 {
1159         u32 val;
1160         bool cur_state;
1161
1162         val = I915_READ(FDI_RX_CTL(pipe));
1163         cur_state = !!(val & FDI_RX_ENABLE);
1164         I915_STATE_WARN(cur_state != state,
1165              "FDI RX state assertion failure (expected %s, current %s)\n",
1166                         onoff(state), onoff(cur_state));
1167 }
1168 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1169 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1170
1171 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1172                                       enum pipe pipe)
1173 {
1174         u32 val;
1175
1176         /* ILK FDI PLL is always enabled */
1177         if (IS_GEN5(dev_priv))
1178                 return;
1179
1180         /* On Haswell, DDI ports are responsible for the FDI PLL setup */
1181         if (HAS_DDI(dev_priv))
1182                 return;
1183
1184         val = I915_READ(FDI_TX_CTL(pipe));
1185         I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1186 }
1187
1188 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1189                        enum pipe pipe, bool state)
1190 {
1191         u32 val;
1192         bool cur_state;
1193
1194         val = I915_READ(FDI_RX_CTL(pipe));
1195         cur_state = !!(val & FDI_RX_PLL_ENABLE);
1196         I915_STATE_WARN(cur_state != state,
1197              "FDI RX PLL assertion failure (expected %s, current %s)\n",
1198                         onoff(state), onoff(cur_state));
1199 }
1200
1201 void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
1202 {
1203         i915_reg_t pp_reg;
1204         u32 val;
1205         enum pipe panel_pipe = INVALID_PIPE;
1206         bool locked = true;
1207
1208         if (WARN_ON(HAS_DDI(dev_priv)))
1209                 return;
1210
1211         if (HAS_PCH_SPLIT(dev_priv)) {
1212                 u32 port_sel;
1213
1214                 pp_reg = PP_CONTROL(0);
1215                 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1216
1217                 switch (port_sel) {
1218                 case PANEL_PORT_SELECT_LVDS:
1219                         intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
1220                         break;
1221                 case PANEL_PORT_SELECT_DPA:
1222                         intel_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
1223                         break;
1224                 case PANEL_PORT_SELECT_DPC:
1225                         intel_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
1226                         break;
1227                 case PANEL_PORT_SELECT_DPD:
1228                         intel_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
1229                         break;
1230                 default:
1231                         MISSING_CASE(port_sel);
1232                         break;
1233                 }
1234         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1235                 /* presumably write lock depends on pipe, not port select */
1236                 pp_reg = PP_CONTROL(pipe);
1237                 panel_pipe = pipe;
1238         } else {
1239                 u32 port_sel;
1240
1241                 pp_reg = PP_CONTROL(0);
1242                 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1243
1244                 WARN_ON(port_sel != PANEL_PORT_SELECT_LVDS);
1245                 intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
1246         }
1247
1248         val = I915_READ(pp_reg);
1249         if (!(val & PANEL_POWER_ON) ||
1250             ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
1251                 locked = false;
1252
1253         I915_STATE_WARN(panel_pipe == pipe && locked,
1254              "panel assertion failure, pipe %c regs locked\n",
1255              pipe_name(pipe));
1256 }
1257
1258 void assert_pipe(struct drm_i915_private *dev_priv,
1259                  enum pipe pipe, bool state)
1260 {
1261         bool cur_state;
1262         enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1263                                                                       pipe);
1264         enum intel_display_power_domain power_domain;
1265
1266         /* we keep both pipes enabled on 830 */
1267         if (IS_I830(dev_priv))
1268                 state = true;
1269
1270         power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
1271         if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
1272                 u32 val = I915_READ(PIPECONF(cpu_transcoder));
1273                 cur_state = !!(val & PIPECONF_ENABLE);
1274
1275                 intel_display_power_put(dev_priv, power_domain);
1276         } else {
1277                 cur_state = false;
1278         }
1279
1280         I915_STATE_WARN(cur_state != state,
1281              "pipe %c assertion failure (expected %s, current %s)\n",
1282                         pipe_name(pipe), onoff(state), onoff(cur_state));
1283 }
1284
1285 static void assert_plane(struct intel_plane *plane, bool state)
1286 {
1287         enum pipe pipe;
1288         bool cur_state;
1289
1290         cur_state = plane->get_hw_state(plane, &pipe);
1291
1292         I915_STATE_WARN(cur_state != state,
1293                         "%s assertion failure (expected %s, current %s)\n",
1294                         plane->base.name, onoff(state), onoff(cur_state));
1295 }
1296
1297 #define assert_plane_enabled(p) assert_plane(p, true)
1298 #define assert_plane_disabled(p) assert_plane(p, false)
1299
1300 static void assert_planes_disabled(struct intel_crtc *crtc)
1301 {
1302         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1303         struct intel_plane *plane;
1304
1305         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
1306                 assert_plane_disabled(plane);
1307 }
1308
1309 static void assert_vblank_disabled(struct drm_crtc *crtc)
1310 {
1311         if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
1312                 drm_crtc_vblank_put(crtc);
1313 }
1314
1315 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1316                                     enum pipe pipe)
1317 {
1318         u32 val;
1319         bool enabled;
1320
1321         val = I915_READ(PCH_TRANSCONF(pipe));
1322         enabled = !!(val & TRANS_ENABLE);
1323         I915_STATE_WARN(enabled,
1324              "transcoder assertion failed, should be off on pipe %c but is still active\n",
1325              pipe_name(pipe));
1326 }
1327
1328 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1329                                    enum pipe pipe, enum port port,
1330                                    i915_reg_t dp_reg)
1331 {
1332         enum pipe port_pipe;
1333         bool state;
1334
1335         state = intel_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
1336
1337         I915_STATE_WARN(state && port_pipe == pipe,
1338                         "PCH DP %c enabled on transcoder %c, should be disabled\n",
1339                         port_name(port), pipe_name(pipe));
1340
1341         I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1342                         "IBX PCH DP %c still using transcoder B\n",
1343                         port_name(port));
1344 }
1345
1346 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1347                                      enum pipe pipe, enum port port,
1348                                      i915_reg_t hdmi_reg)
1349 {
1350         enum pipe port_pipe;
1351         bool state;
1352
1353         state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
1354
1355         I915_STATE_WARN(state && port_pipe == pipe,
1356                         "PCH HDMI %c enabled on transcoder %c, should be disabled\n",
1357                         port_name(port), pipe_name(pipe));
1358
1359         I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1360                         "IBX PCH HDMI %c still using transcoder B\n",
1361                         port_name(port));
1362 }
1363
1364 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1365                                       enum pipe pipe)
1366 {
1367         enum pipe port_pipe;
1368
1369         assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
1370         assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
1371         assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
1372
1373         I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
1374                         port_pipe == pipe,
1375                         "PCH VGA enabled on transcoder %c, should be disabled\n",
1376                         pipe_name(pipe));
1377
1378         I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
1379                         port_pipe == pipe,
1380                         "PCH LVDS enabled on transcoder %c, should be disabled\n",
1381                         pipe_name(pipe));
1382
1383         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
1384         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
1385         assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
1386 }
1387
1388 static void _vlv_enable_pll(struct intel_crtc *crtc,
1389                             const struct intel_crtc_state *pipe_config)
1390 {
1391         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1392         enum pipe pipe = crtc->pipe;
1393
1394         I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1395         POSTING_READ(DPLL(pipe));
1396         udelay(150);
1397
1398         if (intel_wait_for_register(dev_priv,
1399                                     DPLL(pipe),
1400                                     DPLL_LOCK_VLV,
1401                                     DPLL_LOCK_VLV,
1402                                     1))
1403                 DRM_ERROR("DPLL %d failed to lock\n", pipe);
1404 }
1405
1406 static void vlv_enable_pll(struct intel_crtc *crtc,
1407                            const struct intel_crtc_state *pipe_config)
1408 {
1409         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1410         enum pipe pipe = crtc->pipe;
1411
1412         assert_pipe_disabled(dev_priv, pipe);
1413
1414         /* PLL is protected by panel, make sure we can write it */
1415         assert_panel_unlocked(dev_priv, pipe);
1416
1417         if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1418                 _vlv_enable_pll(crtc, pipe_config);
1419
1420         I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1421         POSTING_READ(DPLL_MD(pipe));
1422 }
1423
1424
1425 static void _chv_enable_pll(struct intel_crtc *crtc,
1426                             const struct intel_crtc_state *pipe_config)
1427 {
1428         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1429         enum pipe pipe = crtc->pipe;
1430         enum dpio_channel port = vlv_pipe_to_channel(pipe);
1431         u32 tmp;
1432
1433         mutex_lock(&dev_priv->sb_lock);
1434
1435         /* Enable back the 10bit clock to display controller */
1436         tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1437         tmp |= DPIO_DCLKP_EN;
1438         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1439
1440         mutex_unlock(&dev_priv->sb_lock);
1441
1442         /*
1443          * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1444          */
1445         udelay(1);
1446
1447         /* Enable PLL */
1448         I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1449
1450         /* Check PLL is locked */
1451         if (intel_wait_for_register(dev_priv,
1452                                     DPLL(pipe), DPLL_LOCK_VLV, DPLL_LOCK_VLV,
1453                                     1))
1454                 DRM_ERROR("PLL %d failed to lock\n", pipe);
1455 }
1456
1457 static void chv_enable_pll(struct intel_crtc *crtc,
1458                            const struct intel_crtc_state *pipe_config)
1459 {
1460         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1461         enum pipe pipe = crtc->pipe;
1462
1463         assert_pipe_disabled(dev_priv, pipe);
1464
1465         /* PLL is protected by panel, make sure we can write it */
1466         assert_panel_unlocked(dev_priv, pipe);
1467
1468         if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1469                 _chv_enable_pll(crtc, pipe_config);
1470
1471         if (pipe != PIPE_A) {
1472                 /*
1473                  * WaPixelRepeatModeFixForC0:chv
1474                  *
1475                  * DPLLCMD is AWOL. Use chicken bits to propagate
1476                  * the value from DPLLBMD to either pipe B or C.
1477                  */
1478                 I915_WRITE(CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
1479                 I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md);
1480                 I915_WRITE(CBR4_VLV, 0);
1481                 dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
1482
1483                 /*
1484                  * DPLLB VGA mode also seems to cause problems.
1485                  * We should always have it disabled.
1486                  */
1487                 WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0);
1488         } else {
1489                 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1490                 POSTING_READ(DPLL_MD(pipe));
1491         }
1492 }
1493
1494 static int intel_num_dvo_pipes(struct drm_i915_private *dev_priv)
1495 {
1496         struct intel_crtc *crtc;
1497         int count = 0;
1498
1499         for_each_intel_crtc(&dev_priv->drm, crtc) {
1500                 count += crtc->base.state->active &&
1501                         intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO);
1502         }
1503
1504         return count;
1505 }
1506
1507 static void i9xx_enable_pll(struct intel_crtc *crtc,
1508                             const struct intel_crtc_state *crtc_state)
1509 {
1510         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1511         i915_reg_t reg = DPLL(crtc->pipe);
1512         u32 dpll = crtc_state->dpll_hw_state.dpll;
1513         int i;
1514
1515         assert_pipe_disabled(dev_priv, crtc->pipe);
1516
1517         /* PLL is protected by panel, make sure we can write it */
1518         if (IS_MOBILE(dev_priv) && !IS_I830(dev_priv))
1519                 assert_panel_unlocked(dev_priv, crtc->pipe);
1520
1521         /* Enable DVO 2x clock on both PLLs if necessary */
1522         if (IS_I830(dev_priv) && intel_num_dvo_pipes(dev_priv) > 0) {
1523                 /*
1524                  * It appears to be important that we don't enable this
1525                  * for the current pipe before otherwise configuring the
1526                  * PLL. No idea how this should be handled if multiple
1527                  * DVO outputs are enabled simultaneosly.
1528                  */
1529                 dpll |= DPLL_DVO_2X_MODE;
1530                 I915_WRITE(DPLL(!crtc->pipe),
1531                            I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE);
1532         }
1533
1534         /*
1535          * Apparently we need to have VGA mode enabled prior to changing
1536          * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1537          * dividers, even though the register value does change.
1538          */
1539         I915_WRITE(reg, 0);
1540
1541         I915_WRITE(reg, dpll);
1542
1543         /* Wait for the clocks to stabilize. */
1544         POSTING_READ(reg);
1545         udelay(150);
1546
1547         if (INTEL_GEN(dev_priv) >= 4) {
1548                 I915_WRITE(DPLL_MD(crtc->pipe),
1549                            crtc_state->dpll_hw_state.dpll_md);
1550         } else {
1551                 /* The pixel multiplier can only be updated once the
1552                  * DPLL is enabled and the clocks are stable.
1553                  *
1554                  * So write it again.
1555                  */
1556                 I915_WRITE(reg, dpll);
1557         }
1558
1559         /* We do this three times for luck */
1560         for (i = 0; i < 3; i++) {
1561                 I915_WRITE(reg, dpll);
1562                 POSTING_READ(reg);
1563                 udelay(150); /* wait for warmup */
1564         }
1565 }
1566
1567 static void i9xx_disable_pll(struct intel_crtc *crtc)
1568 {
1569         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1570         enum pipe pipe = crtc->pipe;
1571
1572         /* Disable DVO 2x clock on both PLLs if necessary */
1573         if (IS_I830(dev_priv) &&
1574             intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO) &&
1575             !intel_num_dvo_pipes(dev_priv)) {
1576                 I915_WRITE(DPLL(PIPE_B),
1577                            I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
1578                 I915_WRITE(DPLL(PIPE_A),
1579                            I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE);
1580         }
1581
1582         /* Don't disable pipe or pipe PLLs if needed */
1583         if (IS_I830(dev_priv))
1584                 return;
1585
1586         /* Make sure the pipe isn't still relying on us */
1587         assert_pipe_disabled(dev_priv, pipe);
1588
1589         I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
1590         POSTING_READ(DPLL(pipe));
1591 }
1592
1593 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1594 {
1595         u32 val;
1596
1597         /* Make sure the pipe isn't still relying on us */
1598         assert_pipe_disabled(dev_priv, pipe);
1599
1600         val = DPLL_INTEGRATED_REF_CLK_VLV |
1601                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1602         if (pipe != PIPE_A)
1603                 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1604
1605         I915_WRITE(DPLL(pipe), val);
1606         POSTING_READ(DPLL(pipe));
1607 }
1608
1609 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1610 {
1611         enum dpio_channel port = vlv_pipe_to_channel(pipe);
1612         u32 val;
1613
1614         /* Make sure the pipe isn't still relying on us */
1615         assert_pipe_disabled(dev_priv, pipe);
1616
1617         val = DPLL_SSC_REF_CLK_CHV |
1618                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1619         if (pipe != PIPE_A)
1620                 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1621
1622         I915_WRITE(DPLL(pipe), val);
1623         POSTING_READ(DPLL(pipe));
1624
1625         mutex_lock(&dev_priv->sb_lock);
1626
1627         /* Disable 10bit clock to display controller */
1628         val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1629         val &= ~DPIO_DCLKP_EN;
1630         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1631
1632         mutex_unlock(&dev_priv->sb_lock);
1633 }
1634
1635 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1636                          struct intel_digital_port *dport,
1637                          unsigned int expected_mask)
1638 {
1639         u32 port_mask;
1640         i915_reg_t dpll_reg;
1641
1642         switch (dport->base.port) {
1643         case PORT_B:
1644                 port_mask = DPLL_PORTB_READY_MASK;
1645                 dpll_reg = DPLL(0);
1646                 break;
1647         case PORT_C:
1648                 port_mask = DPLL_PORTC_READY_MASK;
1649                 dpll_reg = DPLL(0);
1650                 expected_mask <<= 4;
1651                 break;
1652         case PORT_D:
1653                 port_mask = DPLL_PORTD_READY_MASK;
1654                 dpll_reg = DPIO_PHY_STATUS;
1655                 break;
1656         default:
1657                 BUG();
1658         }
1659
1660         if (intel_wait_for_register(dev_priv,
1661                                     dpll_reg, port_mask, expected_mask,
1662                                     1000))
1663                 WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
1664                      port_name(dport->base.port),
1665                      I915_READ(dpll_reg) & port_mask, expected_mask);
1666 }
1667
1668 static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1669                                            enum pipe pipe)
1670 {
1671         struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
1672                                                                 pipe);
1673         i915_reg_t reg;
1674         uint32_t val, pipeconf_val;
1675
1676         /* Make sure PCH DPLL is enabled */
1677         assert_shared_dpll_enabled(dev_priv, intel_crtc->config->shared_dpll);
1678
1679         /* FDI must be feeding us bits for PCH ports */
1680         assert_fdi_tx_enabled(dev_priv, pipe);
1681         assert_fdi_rx_enabled(dev_priv, pipe);
1682
1683         if (HAS_PCH_CPT(dev_priv)) {
1684                 /* Workaround: Set the timing override bit before enabling the
1685                  * pch transcoder. */
1686                 reg = TRANS_CHICKEN2(pipe);
1687                 val = I915_READ(reg);
1688                 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1689                 I915_WRITE(reg, val);
1690         }
1691
1692         reg = PCH_TRANSCONF(pipe);
1693         val = I915_READ(reg);
1694         pipeconf_val = I915_READ(PIPECONF(pipe));
1695
1696         if (HAS_PCH_IBX(dev_priv)) {
1697                 /*
1698                  * Make the BPC in transcoder be consistent with
1699                  * that in pipeconf reg. For HDMI we must use 8bpc
1700                  * here for both 8bpc and 12bpc.
1701                  */
1702                 val &= ~PIPECONF_BPC_MASK;
1703                 if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_HDMI))
1704                         val |= PIPECONF_8BPC;
1705                 else
1706                         val |= pipeconf_val & PIPECONF_BPC_MASK;
1707         }
1708
1709         val &= ~TRANS_INTERLACE_MASK;
1710         if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
1711                 if (HAS_PCH_IBX(dev_priv) &&
1712                     intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
1713                         val |= TRANS_LEGACY_INTERLACED_ILK;
1714                 else
1715                         val |= TRANS_INTERLACED;
1716         else
1717                 val |= TRANS_PROGRESSIVE;
1718
1719         I915_WRITE(reg, val | TRANS_ENABLE);
1720         if (intel_wait_for_register(dev_priv,
1721                                     reg, TRANS_STATE_ENABLE, TRANS_STATE_ENABLE,
1722                                     100))
1723                 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
1724 }
1725
1726 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1727                                       enum transcoder cpu_transcoder)
1728 {
1729         u32 val, pipeconf_val;
1730
1731         /* FDI must be feeding us bits for PCH ports */
1732         assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
1733         assert_fdi_rx_enabled(dev_priv, PIPE_A);
1734
1735         /* Workaround: set timing override bit. */
1736         val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1737         val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1738         I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1739
1740         val = TRANS_ENABLE;
1741         pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
1742
1743         if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1744             PIPECONF_INTERLACED_ILK)
1745                 val |= TRANS_INTERLACED;
1746         else
1747                 val |= TRANS_PROGRESSIVE;
1748
1749         I915_WRITE(LPT_TRANSCONF, val);
1750         if (intel_wait_for_register(dev_priv,
1751                                     LPT_TRANSCONF,
1752                                     TRANS_STATE_ENABLE,
1753                                     TRANS_STATE_ENABLE,
1754                                     100))
1755                 DRM_ERROR("Failed to enable PCH transcoder\n");
1756 }
1757
1758 static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1759                                             enum pipe pipe)
1760 {
1761         i915_reg_t reg;
1762         uint32_t val;
1763
1764         /* FDI relies on the transcoder */
1765         assert_fdi_tx_disabled(dev_priv, pipe);
1766         assert_fdi_rx_disabled(dev_priv, pipe);
1767
1768         /* Ports must be off as well */
1769         assert_pch_ports_disabled(dev_priv, pipe);
1770
1771         reg = PCH_TRANSCONF(pipe);
1772         val = I915_READ(reg);
1773         val &= ~TRANS_ENABLE;
1774         I915_WRITE(reg, val);
1775         /* wait for PCH transcoder off, transcoder state */
1776         if (intel_wait_for_register(dev_priv,
1777                                     reg, TRANS_STATE_ENABLE, 0,
1778                                     50))
1779                 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
1780
1781         if (HAS_PCH_CPT(dev_priv)) {
1782                 /* Workaround: Clear the timing override chicken bit again. */
1783                 reg = TRANS_CHICKEN2(pipe);
1784                 val = I915_READ(reg);
1785                 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1786                 I915_WRITE(reg, val);
1787         }
1788 }
1789
1790 void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1791 {
1792         u32 val;
1793
1794         val = I915_READ(LPT_TRANSCONF);
1795         val &= ~TRANS_ENABLE;
1796         I915_WRITE(LPT_TRANSCONF, val);
1797         /* wait for PCH transcoder off, transcoder state */
1798         if (intel_wait_for_register(dev_priv,
1799                                     LPT_TRANSCONF, TRANS_STATE_ENABLE, 0,
1800                                     50))
1801                 DRM_ERROR("Failed to disable PCH transcoder\n");
1802
1803         /* Workaround: clear timing override bit. */
1804         val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1805         val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1806         I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1807 }
1808
1809 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
1810 {
1811         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1812
1813         if (HAS_PCH_LPT(dev_priv))
1814                 return PIPE_A;
1815         else
1816                 return crtc->pipe;
1817 }
1818
1819 static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
1820 {
1821         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
1822         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1823         enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
1824         enum pipe pipe = crtc->pipe;
1825         i915_reg_t reg;
1826         u32 val;
1827
1828         DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
1829
1830         assert_planes_disabled(crtc);
1831
1832         /*
1833          * A pipe without a PLL won't actually be able to drive bits from
1834          * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
1835          * need the check.
1836          */
1837         if (HAS_GMCH_DISPLAY(dev_priv)) {
1838                 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
1839                         assert_dsi_pll_enabled(dev_priv);
1840                 else
1841                         assert_pll_enabled(dev_priv, pipe);
1842         } else {
1843                 if (new_crtc_state->has_pch_encoder) {
1844                         /* if driving the PCH, we need FDI enabled */
1845                         assert_fdi_rx_pll_enabled(dev_priv,
1846                                                   intel_crtc_pch_transcoder(crtc));
1847                         assert_fdi_tx_pll_enabled(dev_priv,
1848                                                   (enum pipe) cpu_transcoder);
1849                 }
1850                 /* FIXME: assert CPU port conditions for SNB+ */
1851         }
1852
1853         reg = PIPECONF(cpu_transcoder);
1854         val = I915_READ(reg);
1855         if (val & PIPECONF_ENABLE) {
1856                 /* we keep both pipes enabled on 830 */
1857                 WARN_ON(!IS_I830(dev_priv));
1858                 return;
1859         }
1860
1861         I915_WRITE(reg, val | PIPECONF_ENABLE);
1862         POSTING_READ(reg);
1863
1864         /*
1865          * Until the pipe starts PIPEDSL reads will return a stale value,
1866          * which causes an apparent vblank timestamp jump when PIPEDSL
1867          * resets to its proper value. That also messes up the frame count
1868          * when it's derived from the timestamps. So let's wait for the
1869          * pipe to start properly before we call drm_crtc_vblank_on()
1870          */
1871         if (dev_priv->drm.max_vblank_count == 0)
1872                 intel_wait_for_pipe_scanline_moving(crtc);
1873 }
1874
1875 static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
1876 {
1877         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
1878         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1879         enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
1880         enum pipe pipe = crtc->pipe;
1881         i915_reg_t reg;
1882         u32 val;
1883
1884         DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
1885
1886         /*
1887          * Make sure planes won't keep trying to pump pixels to us,
1888          * or we might hang the display.
1889          */
1890         assert_planes_disabled(crtc);
1891
1892         reg = PIPECONF(cpu_transcoder);
1893         val = I915_READ(reg);
1894         if ((val & PIPECONF_ENABLE) == 0)
1895                 return;
1896
1897         /*
1898          * Double wide has implications for planes
1899          * so best keep it disabled when not needed.
1900          */
1901         if (old_crtc_state->double_wide)
1902                 val &= ~PIPECONF_DOUBLE_WIDE;
1903
1904         /* Don't disable pipe or pipe PLLs if needed */
1905         if (!IS_I830(dev_priv))
1906                 val &= ~PIPECONF_ENABLE;
1907
1908         I915_WRITE(reg, val);
1909         if ((val & PIPECONF_ENABLE) == 0)
1910                 intel_wait_for_pipe_off(old_crtc_state);
1911 }
1912
1913 static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
1914 {
1915         return IS_GEN2(dev_priv) ? 2048 : 4096;
1916 }
1917
1918 static unsigned int
1919 intel_tile_width_bytes(const struct drm_framebuffer *fb, int plane)
1920 {
1921         struct drm_i915_private *dev_priv = to_i915(fb->dev);
1922         unsigned int cpp = fb->format->cpp[plane];
1923
1924         switch (fb->modifier) {
1925         case DRM_FORMAT_MOD_LINEAR:
1926                 return cpp;
1927         case I915_FORMAT_MOD_X_TILED:
1928                 if (IS_GEN2(dev_priv))
1929                         return 128;
1930                 else
1931                         return 512;
1932         case I915_FORMAT_MOD_Y_TILED_CCS:
1933                 if (plane == 1)
1934                         return 128;
1935                 /* fall through */
1936         case I915_FORMAT_MOD_Y_TILED:
1937                 if (IS_GEN2(dev_priv) || HAS_128_BYTE_Y_TILING(dev_priv))
1938                         return 128;
1939                 else
1940                         return 512;
1941         case I915_FORMAT_MOD_Yf_TILED_CCS:
1942                 if (plane == 1)
1943                         return 128;
1944                 /* fall through */
1945         case I915_FORMAT_MOD_Yf_TILED:
1946                 switch (cpp) {
1947                 case 1:
1948                         return 64;
1949                 case 2:
1950                 case 4:
1951                         return 128;
1952                 case 8:
1953                 case 16:
1954                         return 256;
1955                 default:
1956                         MISSING_CASE(cpp);
1957                         return cpp;
1958                 }
1959                 break;
1960         default:
1961                 MISSING_CASE(fb->modifier);
1962                 return cpp;
1963         }
1964 }
1965
1966 static unsigned int
1967 intel_tile_height(const struct drm_framebuffer *fb, int plane)
1968 {
1969         if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
1970                 return 1;
1971         else
1972                 return intel_tile_size(to_i915(fb->dev)) /
1973                         intel_tile_width_bytes(fb, plane);
1974 }
1975
1976 /* Return the tile dimensions in pixel units */
1977 static void intel_tile_dims(const struct drm_framebuffer *fb, int plane,
1978                             unsigned int *tile_width,
1979                             unsigned int *tile_height)
1980 {
1981         unsigned int tile_width_bytes = intel_tile_width_bytes(fb, plane);
1982         unsigned int cpp = fb->format->cpp[plane];
1983
1984         *tile_width = tile_width_bytes / cpp;
1985         *tile_height = intel_tile_size(to_i915(fb->dev)) / tile_width_bytes;
1986 }
1987
1988 unsigned int
1989 intel_fb_align_height(const struct drm_framebuffer *fb,
1990                       int plane, unsigned int height)
1991 {
1992         unsigned int tile_height = intel_tile_height(fb, plane);
1993
1994         return ALIGN(height, tile_height);
1995 }
1996
1997 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
1998 {
1999         unsigned int size = 0;
2000         int i;
2001
2002         for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
2003                 size += rot_info->plane[i].width * rot_info->plane[i].height;
2004
2005         return size;
2006 }
2007
2008 static void
2009 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
2010                         const struct drm_framebuffer *fb,
2011                         unsigned int rotation)
2012 {
2013         view->type = I915_GGTT_VIEW_NORMAL;
2014         if (drm_rotation_90_or_270(rotation)) {
2015                 view->type = I915_GGTT_VIEW_ROTATED;
2016                 view->rotated = to_intel_framebuffer(fb)->rot_info;
2017         }
2018 }
2019
2020 static unsigned int intel_cursor_alignment(const struct drm_i915_private *dev_priv)
2021 {
2022         if (IS_I830(dev_priv))
2023                 return 16 * 1024;
2024         else if (IS_I85X(dev_priv))
2025                 return 256;
2026         else if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
2027                 return 32;
2028         else
2029                 return 4 * 1024;
2030 }
2031
2032 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
2033 {
2034         if (INTEL_GEN(dev_priv) >= 9)
2035                 return 256 * 1024;
2036         else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
2037                  IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2038                 return 128 * 1024;
2039         else if (INTEL_GEN(dev_priv) >= 4)
2040                 return 4 * 1024;
2041         else
2042                 return 0;
2043 }
2044
2045 static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
2046                                          int plane)
2047 {
2048         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2049
2050         /* AUX_DIST needs only 4K alignment */
2051         if (plane == 1)
2052                 return 4096;
2053
2054         switch (fb->modifier) {
2055         case DRM_FORMAT_MOD_LINEAR:
2056                 return intel_linear_alignment(dev_priv);
2057         case I915_FORMAT_MOD_X_TILED:
2058                 if (INTEL_GEN(dev_priv) >= 9)
2059                         return 256 * 1024;
2060                 return 0;
2061         case I915_FORMAT_MOD_Y_TILED_CCS:
2062         case I915_FORMAT_MOD_Yf_TILED_CCS:
2063         case I915_FORMAT_MOD_Y_TILED:
2064         case I915_FORMAT_MOD_Yf_TILED:
2065                 return 1 * 1024 * 1024;
2066         default:
2067                 MISSING_CASE(fb->modifier);
2068                 return 0;
2069         }
2070 }
2071
2072 static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
2073 {
2074         struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
2075         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
2076
2077         return INTEL_GEN(dev_priv) < 4 || plane->has_fbc;
2078 }
2079
2080 struct i915_vma *
2081 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
2082                            unsigned int rotation,
2083                            bool uses_fence,
2084                            unsigned long *out_flags)
2085 {
2086         struct drm_device *dev = fb->dev;
2087         struct drm_i915_private *dev_priv = to_i915(dev);
2088         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2089         struct i915_ggtt_view view;
2090         struct i915_vma *vma;
2091         unsigned int pinctl;
2092         u32 alignment;
2093
2094         WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2095
2096         alignment = intel_surf_alignment(fb, 0);
2097
2098         intel_fill_fb_ggtt_view(&view, fb, rotation);
2099
2100         /* Note that the w/a also requires 64 PTE of padding following the
2101          * bo. We currently fill all unused PTE with the shadow page and so
2102          * we should always have valid PTE following the scanout preventing
2103          * the VT-d warning.
2104          */
2105         if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
2106                 alignment = 256 * 1024;
2107
2108         /*
2109          * Global gtt pte registers are special registers which actually forward
2110          * writes to a chunk of system memory. Which means that there is no risk
2111          * that the register values disappear as soon as we call
2112          * intel_runtime_pm_put(), so it is correct to wrap only the
2113          * pin/unpin/fence and not more.
2114          */
2115         intel_runtime_pm_get(dev_priv);
2116
2117         atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
2118
2119         pinctl = 0;
2120
2121         /* Valleyview is definitely limited to scanning out the first
2122          * 512MiB. Lets presume this behaviour was inherited from the
2123          * g4x display engine and that all earlier gen are similarly
2124          * limited. Testing suggests that it is a little more
2125          * complicated than this. For example, Cherryview appears quite
2126          * happy to scanout from anywhere within its global aperture.
2127          */
2128         if (HAS_GMCH_DISPLAY(dev_priv))
2129                 pinctl |= PIN_MAPPABLE;
2130
2131         vma = i915_gem_object_pin_to_display_plane(obj,
2132                                                    alignment, &view, pinctl);
2133         if (IS_ERR(vma))
2134                 goto err;
2135
2136         if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
2137                 int ret;
2138
2139                 /* Install a fence for tiled scan-out. Pre-i965 always needs a
2140                  * fence, whereas 965+ only requires a fence if using
2141                  * framebuffer compression.  For simplicity, we always, when
2142                  * possible, install a fence as the cost is not that onerous.
2143                  *
2144                  * If we fail to fence the tiled scanout, then either the
2145                  * modeset will reject the change (which is highly unlikely as
2146                  * the affected systems, all but one, do not have unmappable
2147                  * space) or we will not be able to enable full powersaving
2148                  * techniques (also likely not to apply due to various limits
2149                  * FBC and the like impose on the size of the buffer, which
2150                  * presumably we violated anyway with this unmappable buffer).
2151                  * Anyway, it is presumably better to stumble onwards with
2152                  * something and try to run the system in a "less than optimal"
2153                  * mode that matches the user configuration.
2154                  */
2155                 ret = i915_vma_pin_fence(vma);
2156                 if (ret != 0 && INTEL_GEN(dev_priv) < 4) {
2157                         i915_gem_object_unpin_from_display_plane(vma);
2158                         vma = ERR_PTR(ret);
2159                         goto err;
2160                 }
2161
2162                 if (ret == 0 && vma->fence)
2163                         *out_flags |= PLANE_HAS_FENCE;
2164         }
2165
2166         i915_vma_get(vma);
2167 err:
2168         atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
2169
2170         intel_runtime_pm_put(dev_priv);
2171         return vma;
2172 }
2173
2174 void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
2175 {
2176         lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
2177
2178         if (flags & PLANE_HAS_FENCE)
2179                 i915_vma_unpin_fence(vma);
2180         i915_gem_object_unpin_from_display_plane(vma);
2181         i915_vma_put(vma);
2182 }
2183
2184 static int intel_fb_pitch(const struct drm_framebuffer *fb, int plane,
2185                           unsigned int rotation)
2186 {
2187         if (drm_rotation_90_or_270(rotation))
2188                 return to_intel_framebuffer(fb)->rotated[plane].pitch;
2189         else
2190                 return fb->pitches[plane];
2191 }
2192
2193 /*
2194  * Convert the x/y offsets into a linear offset.
2195  * Only valid with 0/180 degree rotation, which is fine since linear
2196  * offset is only used with linear buffers on pre-hsw and tiled buffers
2197  * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
2198  */
2199 u32 intel_fb_xy_to_linear(int x, int y,
2200                           const struct intel_plane_state *state,
2201                           int plane)
2202 {
2203         const struct drm_framebuffer *fb = state->base.fb;
2204         unsigned int cpp = fb->format->cpp[plane];
2205         unsigned int pitch = fb->pitches[plane];
2206
2207         return y * pitch + x * cpp;
2208 }
2209
2210 /*
2211  * Add the x/y offsets derived from fb->offsets[] to the user
2212  * specified plane src x/y offsets. The resulting x/y offsets
2213  * specify the start of scanout from the beginning of the gtt mapping.
2214  */
2215 void intel_add_fb_offsets(int *x, int *y,
2216                           const struct intel_plane_state *state,
2217                           int plane)
2218
2219 {
2220         const struct intel_framebuffer *intel_fb = to_intel_framebuffer(state->base.fb);
2221         unsigned int rotation = state->base.rotation;
2222
2223         if (drm_rotation_90_or_270(rotation)) {
2224                 *x += intel_fb->rotated[plane].x;
2225                 *y += intel_fb->rotated[plane].y;
2226         } else {
2227                 *x += intel_fb->normal[plane].x;
2228                 *y += intel_fb->normal[plane].y;
2229         }
2230 }
2231
2232 static u32 __intel_adjust_tile_offset(int *x, int *y,
2233                                       unsigned int tile_width,
2234                                       unsigned int tile_height,
2235                                       unsigned int tile_size,
2236                                       unsigned int pitch_tiles,
2237                                       u32 old_offset,
2238                                       u32 new_offset)
2239 {
2240         unsigned int pitch_pixels = pitch_tiles * tile_width;
2241         unsigned int tiles;
2242
2243         WARN_ON(old_offset & (tile_size - 1));
2244         WARN_ON(new_offset & (tile_size - 1));
2245         WARN_ON(new_offset > old_offset);
2246
2247         tiles = (old_offset - new_offset) / tile_size;
2248
2249         *y += tiles / pitch_tiles * tile_height;
2250         *x += tiles % pitch_tiles * tile_width;
2251
2252         /* minimize x in case it got needlessly big */
2253         *y += *x / pitch_pixels * tile_height;
2254         *x %= pitch_pixels;
2255
2256         return new_offset;
2257 }
2258
2259 static u32 _intel_adjust_tile_offset(int *x, int *y,
2260                                      const struct drm_framebuffer *fb, int plane,
2261                                      unsigned int rotation,
2262                                      u32 old_offset, u32 new_offset)
2263 {
2264         const struct drm_i915_private *dev_priv = to_i915(fb->dev);
2265         unsigned int cpp = fb->format->cpp[plane];
2266         unsigned int pitch = intel_fb_pitch(fb, plane, rotation);
2267
2268         WARN_ON(new_offset > old_offset);
2269
2270         if (fb->modifier != DRM_FORMAT_MOD_LINEAR) {
2271                 unsigned int tile_size, tile_width, tile_height;
2272                 unsigned int pitch_tiles;
2273
2274                 tile_size = intel_tile_size(dev_priv);
2275                 intel_tile_dims(fb, plane, &tile_width, &tile_height);
2276
2277                 if (drm_rotation_90_or_270(rotation)) {
2278                         pitch_tiles = pitch / tile_height;
2279                         swap(tile_width, tile_height);
2280                 } else {
2281                         pitch_tiles = pitch / (tile_width * cpp);
2282                 }
2283
2284                 __intel_adjust_tile_offset(x, y, tile_width, tile_height,
2285                                            tile_size, pitch_tiles,
2286                                            old_offset, new_offset);
2287         } else {
2288                 old_offset += *y * pitch + *x * cpp;
2289
2290                 *y = (old_offset - new_offset) / pitch;
2291                 *x = ((old_offset - new_offset) - *y * pitch) / cpp;
2292         }
2293
2294         return new_offset;
2295 }
2296
2297 /*
2298  * Adjust the tile offset by moving the difference into
2299  * the x/y offsets.
2300  */
2301 static u32 intel_adjust_tile_offset(int *x, int *y,
2302                                     const struct intel_plane_state *state, int plane,
2303                                     u32 old_offset, u32 new_offset)
2304 {
2305         return _intel_adjust_tile_offset(x, y, state->base.fb, plane,
2306                                          state->base.rotation,
2307                                          old_offset, new_offset);
2308 }
2309
2310 /*
2311  * Computes the linear offset to the base tile and adjusts
2312  * x, y. bytes per pixel is assumed to be a power-of-two.
2313  *
2314  * In the 90/270 rotated case, x and y are assumed
2315  * to be already rotated to match the rotated GTT view, and
2316  * pitch is the tile_height aligned framebuffer height.
2317  *
2318  * This function is used when computing the derived information
2319  * under intel_framebuffer, so using any of that information
2320  * here is not allowed. Anything under drm_framebuffer can be
2321  * used. This is why the user has to pass in the pitch since it
2322  * is specified in the rotated orientation.
2323  */
2324 static u32 _intel_compute_tile_offset(const struct drm_i915_private *dev_priv,
2325                                       int *x, int *y,
2326                                       const struct drm_framebuffer *fb, int plane,
2327                                       unsigned int pitch,
2328                                       unsigned int rotation,
2329                                       u32 alignment)
2330 {
2331         uint64_t fb_modifier = fb->modifier;
2332         unsigned int cpp = fb->format->cpp[plane];
2333         u32 offset, offset_aligned;
2334
2335         if (alignment)
2336                 alignment--;
2337
2338         if (fb_modifier != DRM_FORMAT_MOD_LINEAR) {
2339                 unsigned int tile_size, tile_width, tile_height;
2340                 unsigned int tile_rows, tiles, pitch_tiles;
2341
2342                 tile_size = intel_tile_size(dev_priv);
2343                 intel_tile_dims(fb, plane, &tile_width, &tile_height);
2344
2345                 if (drm_rotation_90_or_270(rotation)) {
2346                         pitch_tiles = pitch / tile_height;
2347                         swap(tile_width, tile_height);
2348                 } else {
2349                         pitch_tiles = pitch / (tile_width * cpp);
2350                 }
2351
2352                 tile_rows = *y / tile_height;
2353                 *y %= tile_height;
2354
2355                 tiles = *x / tile_width;
2356                 *x %= tile_width;
2357
2358                 offset = (tile_rows * pitch_tiles + tiles) * tile_size;
2359                 offset_aligned = offset & ~alignment;
2360
2361                 __intel_adjust_tile_offset(x, y, tile_width, tile_height,
2362                                            tile_size, pitch_tiles,
2363                                            offset, offset_aligned);
2364         } else {
2365                 offset = *y * pitch + *x * cpp;
2366                 offset_aligned = offset & ~alignment;
2367
2368                 *y = (offset & alignment) / pitch;
2369                 *x = ((offset & alignment) - *y * pitch) / cpp;
2370         }
2371
2372         return offset_aligned;
2373 }
2374
2375 u32 intel_compute_tile_offset(int *x, int *y,
2376                               const struct intel_plane_state *state,
2377                               int plane)
2378 {
2379         struct intel_plane *intel_plane = to_intel_plane(state->base.plane);
2380         struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
2381         const struct drm_framebuffer *fb = state->base.fb;
2382         unsigned int rotation = state->base.rotation;
2383         int pitch = intel_fb_pitch(fb, plane, rotation);
2384         u32 alignment;
2385
2386         if (intel_plane->id == PLANE_CURSOR)
2387                 alignment = intel_cursor_alignment(dev_priv);
2388         else
2389                 alignment = intel_surf_alignment(fb, plane);
2390
2391         return _intel_compute_tile_offset(dev_priv, x, y, fb, plane, pitch,
2392                                           rotation, alignment);
2393 }
2394
2395 /* Convert the fb->offset[] into x/y offsets */
2396 static int intel_fb_offset_to_xy(int *x, int *y,
2397                                  const struct drm_framebuffer *fb, int plane)
2398 {
2399         struct drm_i915_private *dev_priv = to_i915(fb->dev);
2400
2401         if (fb->modifier != DRM_FORMAT_MOD_LINEAR &&
2402             fb->offsets[plane] % intel_tile_size(dev_priv))
2403                 return -EINVAL;
2404
2405         *x = 0;
2406         *y = 0;
2407
2408         _intel_adjust_tile_offset(x, y,
2409                                   fb, plane, DRM_MODE_ROTATE_0,
2410                                   fb->offsets[plane], 0);
2411
2412         return 0;
2413 }
2414
2415 static unsigned int intel_fb_modifier_to_tiling(uint64_t fb_modifier)
2416 {
2417         switch (fb_modifier) {
2418         case I915_FORMAT_MOD_X_TILED:
2419                 return I915_TILING_X;
2420         case I915_FORMAT_MOD_Y_TILED:
2421         case I915_FORMAT_MOD_Y_TILED_CCS:
2422                 return I915_TILING_Y;
2423         default:
2424                 return I915_TILING_NONE;
2425         }
2426 }
2427
2428 /*
2429  * From the Sky Lake PRM:
2430  * "The Color Control Surface (CCS) contains the compression status of
2431  *  the cache-line pairs. The compression state of the cache-line pair
2432  *  is specified by 2 bits in the CCS. Each CCS cache-line represents
2433  *  an area on the main surface of 16 x16 sets of 128 byte Y-tiled
2434  *  cache-line-pairs. CCS is always Y tiled."
2435  *
2436  * Since cache line pairs refers to horizontally adjacent cache lines,
2437  * each cache line in the CCS corresponds to an area of 32x16 cache
2438  * lines on the main surface. Since each pixel is 4 bytes, this gives
2439  * us a ratio of one byte in the CCS for each 8x16 pixels in the
2440  * main surface.
2441  */
2442 static const struct drm_format_info ccs_formats[] = {
2443         { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2444         { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2445         { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2446         { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2447 };
2448
2449 static const struct drm_format_info *
2450 lookup_format_info(const struct drm_format_info formats[],
2451                    int num_formats, u32 format)
2452 {
2453         int i;
2454
2455         for (i = 0; i < num_formats; i++) {
2456                 if (formats[i].format == format)
2457                         return &formats[i];
2458         }
2459
2460         return NULL;
2461 }
2462
2463 static const struct drm_format_info *
2464 intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
2465 {
2466         switch (cmd->modifier[0]) {
2467         case I915_FORMAT_MOD_Y_TILED_CCS:
2468         case I915_FORMAT_MOD_Yf_TILED_CCS:
2469                 return lookup_format_info(ccs_formats,
2470                                           ARRAY_SIZE(ccs_formats),
2471                                           cmd->pixel_format);
2472         default:
2473                 return NULL;
2474         }
2475 }
2476
2477 static int
2478 intel_fill_fb_info(struct drm_i915_private *dev_priv,
2479                    struct drm_framebuffer *fb)
2480 {
2481         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2482         struct intel_rotation_info *rot_info = &intel_fb->rot_info;
2483         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2484         u32 gtt_offset_rotated = 0;
2485         unsigned int max_size = 0;
2486         int i, num_planes = fb->format->num_planes;
2487         unsigned int tile_size = intel_tile_size(dev_priv);
2488
2489         for (i = 0; i < num_planes; i++) {
2490                 unsigned int width, height;
2491                 unsigned int cpp, size;
2492                 u32 offset;
2493                 int x, y;
2494                 int ret;
2495
2496                 cpp = fb->format->cpp[i];
2497                 width = drm_framebuffer_plane_width(fb->width, fb, i);
2498                 height = drm_framebuffer_plane_height(fb->height, fb, i);
2499
2500                 ret = intel_fb_offset_to_xy(&x, &y, fb, i);
2501                 if (ret) {
2502                         DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
2503                                       i, fb->offsets[i]);
2504                         return ret;
2505                 }
2506
2507                 if ((fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
2508                      fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS) && i == 1) {
2509                         int hsub = fb->format->hsub;
2510                         int vsub = fb->format->vsub;
2511                         int tile_width, tile_height;
2512                         int main_x, main_y;
2513                         int ccs_x, ccs_y;
2514
2515                         intel_tile_dims(fb, i, &tile_width, &tile_height);
2516                         tile_width *= hsub;
2517                         tile_height *= vsub;
2518
2519                         ccs_x = (x * hsub) % tile_width;
2520                         ccs_y = (y * vsub) % tile_height;
2521                         main_x = intel_fb->normal[0].x % tile_width;
2522                         main_y = intel_fb->normal[0].y % tile_height;
2523
2524                         /*
2525                          * CCS doesn't have its own x/y offset register, so the intra CCS tile
2526                          * x/y offsets must match between CCS and the main surface.
2527                          */
2528                         if (main_x != ccs_x || main_y != ccs_y) {
2529                                 DRM_DEBUG_KMS("Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
2530                                               main_x, main_y,
2531                                               ccs_x, ccs_y,
2532                                               intel_fb->normal[0].x,
2533                                               intel_fb->normal[0].y,
2534                                               x, y);
2535                                 return -EINVAL;
2536                         }
2537                 }
2538
2539                 /*
2540                  * The fence (if used) is aligned to the start of the object
2541                  * so having the framebuffer wrap around across the edge of the
2542                  * fenced region doesn't really work. We have no API to configure
2543                  * the fence start offset within the object (nor could we probably
2544                  * on gen2/3). So it's just easier if we just require that the
2545                  * fb layout agrees with the fence layout. We already check that the
2546                  * fb stride matches the fence stride elsewhere.
2547                  */
2548                 if (i == 0 && i915_gem_object_is_tiled(obj) &&
2549                     (x + width) * cpp > fb->pitches[i]) {
2550                         DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
2551                                       i, fb->offsets[i]);
2552                         return -EINVAL;
2553                 }
2554
2555                 /*
2556                  * First pixel of the framebuffer from
2557                  * the start of the normal gtt mapping.
2558                  */
2559                 intel_fb->normal[i].x = x;
2560                 intel_fb->normal[i].y = y;
2561
2562                 offset = _intel_compute_tile_offset(dev_priv, &x, &y,
2563                                                     fb, i, fb->pitches[i],
2564                                                     DRM_MODE_ROTATE_0, tile_size);
2565                 offset /= tile_size;
2566
2567                 if (fb->modifier != DRM_FORMAT_MOD_LINEAR) {
2568                         unsigned int tile_width, tile_height;
2569                         unsigned int pitch_tiles;
2570                         struct drm_rect r;
2571
2572                         intel_tile_dims(fb, i, &tile_width, &tile_height);
2573
2574                         rot_info->plane[i].offset = offset;
2575                         rot_info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], tile_width * cpp);
2576                         rot_info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
2577                         rot_info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
2578
2579                         intel_fb->rotated[i].pitch =
2580                                 rot_info->plane[i].height * tile_height;
2581
2582                         /* how many tiles does this plane need */
2583                         size = rot_info->plane[i].stride * rot_info->plane[i].height;
2584                         /*
2585                          * If the plane isn't horizontally tile aligned,
2586                          * we need one more tile.
2587                          */
2588                         if (x != 0)
2589                                 size++;
2590
2591                         /* rotate the x/y offsets to match the GTT view */
2592                         r.x1 = x;
2593                         r.y1 = y;
2594                         r.x2 = x + width;
2595                         r.y2 = y + height;
2596                         drm_rect_rotate(&r,
2597                                         rot_info->plane[i].width * tile_width,
2598                                         rot_info->plane[i].height * tile_height,
2599                                         DRM_MODE_ROTATE_270);
2600                         x = r.x1;
2601                         y = r.y1;
2602
2603                         /* rotate the tile dimensions to match the GTT view */
2604                         pitch_tiles = intel_fb->rotated[i].pitch / tile_height;
2605                         swap(tile_width, tile_height);
2606
2607                         /*
2608                          * We only keep the x/y offsets, so push all of the
2609                          * gtt offset into the x/y offsets.
2610                          */
2611                         __intel_adjust_tile_offset(&x, &y,
2612                                                    tile_width, tile_height,
2613                                                    tile_size, pitch_tiles,
2614                                                    gtt_offset_rotated * tile_size, 0);
2615
2616                         gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height;
2617
2618                         /*
2619                          * First pixel of the framebuffer from
2620                          * the start of the rotated gtt mapping.
2621                          */
2622                         intel_fb->rotated[i].x = x;
2623                         intel_fb->rotated[i].y = y;
2624                 } else {
2625                         size = DIV_ROUND_UP((y + height) * fb->pitches[i] +
2626                                             x * cpp, tile_size);
2627                 }
2628
2629                 /* how many tiles in total needed in the bo */
2630                 max_size = max(max_size, offset + size);
2631         }
2632
2633         if (max_size * tile_size > obj->base.size) {
2634                 DRM_DEBUG_KMS("fb too big for bo (need %u bytes, have %zu bytes)\n",
2635                               max_size * tile_size, obj->base.size);
2636                 return -EINVAL;
2637         }
2638
2639         return 0;
2640 }
2641
2642 static int i9xx_format_to_fourcc(int format)
2643 {
2644         switch (format) {
2645         case DISPPLANE_8BPP:
2646                 return DRM_FORMAT_C8;
2647         case DISPPLANE_BGRX555:
2648                 return DRM_FORMAT_XRGB1555;
2649         case DISPPLANE_BGRX565:
2650                 return DRM_FORMAT_RGB565;
2651         default:
2652         case DISPPLANE_BGRX888:
2653                 return DRM_FORMAT_XRGB8888;
2654         case DISPPLANE_RGBX888:
2655                 return DRM_FORMAT_XBGR8888;
2656         case DISPPLANE_BGRX101010:
2657                 return DRM_FORMAT_XRGB2101010;
2658         case DISPPLANE_RGBX101010:
2659                 return DRM_FORMAT_XBGR2101010;
2660         }
2661 }
2662
2663 int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
2664 {
2665         switch (format) {
2666         case PLANE_CTL_FORMAT_RGB_565:
2667                 return DRM_FORMAT_RGB565;
2668         case PLANE_CTL_FORMAT_NV12:
2669                 return DRM_FORMAT_NV12;
2670         default:
2671         case PLANE_CTL_FORMAT_XRGB_8888:
2672                 if (rgb_order) {
2673                         if (alpha)
2674                                 return DRM_FORMAT_ABGR8888;
2675                         else
2676                                 return DRM_FORMAT_XBGR8888;
2677                 } else {
2678                         if (alpha)
2679                                 return DRM_FORMAT_ARGB8888;
2680                         else
2681                                 return DRM_FORMAT_XRGB8888;
2682                 }
2683         case PLANE_CTL_FORMAT_XRGB_2101010:
2684                 if (rgb_order)
2685                         return DRM_FORMAT_XBGR2101010;
2686                 else
2687                         return DRM_FORMAT_XRGB2101010;
2688         }
2689 }
2690
2691 static bool
2692 intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
2693                               struct intel_initial_plane_config *plane_config)
2694 {
2695         struct drm_device *dev = crtc->base.dev;
2696         struct drm_i915_private *dev_priv = to_i915(dev);
2697         struct drm_i915_gem_object *obj = NULL;
2698         struct drm_mode_fb_cmd2 mode_cmd = { 0 };
2699         struct drm_framebuffer *fb = &plane_config->fb->base;
2700         u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
2701         u32 size_aligned = round_up(plane_config->base + plane_config->size,
2702                                     PAGE_SIZE);
2703
2704         size_aligned -= base_aligned;
2705
2706         if (plane_config->size == 0)
2707                 return false;
2708
2709         /* If the FB is too big, just don't use it since fbdev is not very
2710          * important and we should probably use that space with FBC or other
2711          * features. */
2712         if (size_aligned * 2 > dev_priv->stolen_usable_size)
2713                 return false;
2714
2715         mutex_lock(&dev->struct_mutex);
2716         obj = i915_gem_object_create_stolen_for_preallocated(dev_priv,
2717                                                              base_aligned,
2718                                                              base_aligned,
2719                                                              size_aligned);
2720         mutex_unlock(&dev->struct_mutex);
2721         if (!obj)
2722                 return false;
2723
2724         if (plane_config->tiling == I915_TILING_X)
2725                 obj->tiling_and_stride = fb->pitches[0] | I915_TILING_X;
2726
2727         mode_cmd.pixel_format = fb->format->format;
2728         mode_cmd.width = fb->width;
2729         mode_cmd.height = fb->height;
2730         mode_cmd.pitches[0] = fb->pitches[0];
2731         mode_cmd.modifier[0] = fb->modifier;
2732         mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
2733
2734         if (intel_framebuffer_init(to_intel_framebuffer(fb), obj, &mode_cmd)) {
2735                 DRM_DEBUG_KMS("intel fb init failed\n");
2736                 goto out_unref_obj;
2737         }
2738
2739
2740         DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
2741         return true;
2742
2743 out_unref_obj:
2744         i915_gem_object_put(obj);
2745         return false;
2746 }
2747
2748 static void
2749 intel_set_plane_visible(struct intel_crtc_state *crtc_state,
2750                         struct intel_plane_state *plane_state,
2751                         bool visible)
2752 {
2753         struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
2754
2755         plane_state->base.visible = visible;
2756
2757         /* FIXME pre-g4x don't work like this */
2758         if (visible) {
2759                 crtc_state->base.plane_mask |= drm_plane_mask(&plane->base);
2760                 crtc_state->active_planes |= BIT(plane->id);
2761         } else {
2762                 crtc_state->base.plane_mask &= ~drm_plane_mask(&plane->base);
2763                 crtc_state->active_planes &= ~BIT(plane->id);
2764         }
2765
2766         DRM_DEBUG_KMS("%s active planes 0x%x\n",
2767                       crtc_state->base.crtc->name,
2768                       crtc_state->active_planes);
2769 }
2770
2771 static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
2772                                          struct intel_plane *plane)
2773 {
2774         struct intel_crtc_state *crtc_state =
2775                 to_intel_crtc_state(crtc->base.state);
2776         struct intel_plane_state *plane_state =
2777                 to_intel_plane_state(plane->base.state);
2778
2779         intel_set_plane_visible(crtc_state, plane_state, false);
2780
2781         if (plane->id == PLANE_PRIMARY)
2782                 intel_pre_disable_primary_noatomic(&crtc->base);
2783
2784         trace_intel_disable_plane(&plane->base, crtc);
2785         plane->disable_plane(plane, crtc);
2786 }
2787
2788 static void
2789 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2790                              struct intel_initial_plane_config *plane_config)
2791 {
2792         struct drm_device *dev = intel_crtc->base.dev;
2793         struct drm_i915_private *dev_priv = to_i915(dev);
2794         struct drm_crtc *c;
2795         struct drm_i915_gem_object *obj;
2796         struct drm_plane *primary = intel_crtc->base.primary;
2797         struct drm_plane_state *plane_state = primary->state;
2798         struct drm_crtc_state *crtc_state = intel_crtc->base.state;
2799         struct intel_plane *intel_plane = to_intel_plane(primary);
2800         struct intel_plane_state *intel_state =
2801                 to_intel_plane_state(plane_state);
2802         struct drm_framebuffer *fb;
2803
2804         if (!plane_config->fb)
2805                 return;
2806
2807         if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
2808                 fb = &plane_config->fb->base;
2809                 goto valid_fb;
2810         }
2811
2812         kfree(plane_config->fb);
2813
2814         /*
2815          * Failed to alloc the obj, check to see if we should share
2816          * an fb with another CRTC instead
2817          */
2818         for_each_crtc(dev, c) {
2819                 struct intel_plane_state *state;
2820
2821                 if (c == &intel_crtc->base)
2822                         continue;
2823
2824                 if (!to_intel_crtc(c)->active)
2825                         continue;
2826
2827                 state = to_intel_plane_state(c->primary->state);
2828                 if (!state->vma)
2829                         continue;
2830
2831                 if (intel_plane_ggtt_offset(state) == plane_config->base) {
2832                         fb = state->base.fb;
2833                         drm_framebuffer_get(fb);
2834                         goto valid_fb;
2835                 }
2836         }
2837
2838         /*
2839          * We've failed to reconstruct the BIOS FB.  Current display state
2840          * indicates that the primary plane is visible, but has a NULL FB,
2841          * which will lead to problems later if we don't fix it up.  The
2842          * simplest solution is to just disable the primary plane now and
2843          * pretend the BIOS never had it enabled.
2844          */
2845         intel_plane_disable_noatomic(intel_crtc, intel_plane);
2846
2847         return;
2848
2849 valid_fb:
2850         mutex_lock(&dev->struct_mutex);
2851         intel_state->vma =
2852                 intel_pin_and_fence_fb_obj(fb,
2853                                            primary->state->rotation,
2854                                            intel_plane_uses_fence(intel_state),
2855                                            &intel_state->flags);
2856         mutex_unlock(&dev->struct_mutex);
2857         if (IS_ERR(intel_state->vma)) {
2858                 DRM_ERROR("failed to pin boot fb on pipe %d: %li\n",
2859                           intel_crtc->pipe, PTR_ERR(intel_state->vma));
2860
2861                 intel_state->vma = NULL;
2862                 drm_framebuffer_put(fb);
2863                 return;
2864         }
2865
2866         obj = intel_fb_obj(fb);
2867         intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
2868
2869         plane_state->src_x = 0;
2870         plane_state->src_y = 0;
2871         plane_state->src_w = fb->width << 16;
2872         plane_state->src_h = fb->height << 16;
2873
2874         plane_state->crtc_x = 0;
2875         plane_state->crtc_y = 0;
2876         plane_state->crtc_w = fb->width;
2877         plane_state->crtc_h = fb->height;
2878
2879         intel_state->base.src = drm_plane_state_src(plane_state);
2880         intel_state->base.dst = drm_plane_state_dest(plane_state);
2881
2882         if (i915_gem_object_is_tiled(obj))
2883                 dev_priv->preserve_bios_swizzle = true;
2884
2885         plane_state->fb = fb;
2886         plane_state->crtc = &intel_crtc->base;
2887
2888         intel_set_plane_visible(to_intel_crtc_state(crtc_state),
2889                                 to_intel_plane_state(plane_state),
2890                                 true);
2891
2892         atomic_or(to_intel_plane(primary)->frontbuffer_bit,
2893                   &obj->frontbuffer_bits);
2894 }
2895
2896 static int skl_max_plane_width(const struct drm_framebuffer *fb, int plane,
2897                                unsigned int rotation)
2898 {
2899         int cpp = fb->format->cpp[plane];
2900
2901         switch (fb->modifier) {
2902         case DRM_FORMAT_MOD_LINEAR:
2903         case I915_FORMAT_MOD_X_TILED:
2904                 switch (cpp) {
2905                 case 8:
2906                         return 4096;
2907                 case 4:
2908                 case 2:
2909                 case 1:
2910                         return 8192;
2911                 default:
2912                         MISSING_CASE(cpp);
2913                         break;
2914                 }
2915                 break;
2916         case I915_FORMAT_MOD_Y_TILED_CCS:
2917         case I915_FORMAT_MOD_Yf_TILED_CCS:
2918                 /* FIXME AUX plane? */
2919         case I915_FORMAT_MOD_Y_TILED:
2920         case I915_FORMAT_MOD_Yf_TILED:
2921                 switch (cpp) {
2922                 case 8:
2923                         return 2048;
2924                 case 4:
2925                         return 4096;
2926                 case 2:
2927                 case 1:
2928                         return 8192;
2929                 default:
2930                         MISSING_CASE(cpp);
2931                         break;
2932                 }
2933                 break;
2934         default:
2935                 MISSING_CASE(fb->modifier);
2936         }
2937
2938         return 2048;
2939 }
2940
2941 static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state,
2942                                            int main_x, int main_y, u32 main_offset)
2943 {
2944         const struct drm_framebuffer *fb = plane_state->base.fb;
2945         int hsub = fb->format->hsub;
2946         int vsub = fb->format->vsub;
2947         int aux_x = plane_state->aux.x;
2948         int aux_y = plane_state->aux.y;
2949         u32 aux_offset = plane_state->aux.offset;
2950         u32 alignment = intel_surf_alignment(fb, 1);
2951
2952         while (aux_offset >= main_offset && aux_y <= main_y) {
2953                 int x, y;
2954
2955                 if (aux_x == main_x && aux_y == main_y)
2956                         break;
2957
2958                 if (aux_offset == 0)
2959                         break;
2960
2961                 x = aux_x / hsub;
2962                 y = aux_y / vsub;
2963                 aux_offset = intel_adjust_tile_offset(&x, &y, plane_state, 1,
2964                                                       aux_offset, aux_offset - alignment);
2965                 aux_x = x * hsub + aux_x % hsub;
2966                 aux_y = y * vsub + aux_y % vsub;
2967         }
2968
2969         if (aux_x != main_x || aux_y != main_y)
2970                 return false;
2971
2972         plane_state->aux.offset = aux_offset;
2973         plane_state->aux.x = aux_x;
2974         plane_state->aux.y = aux_y;
2975
2976         return true;
2977 }
2978
2979 static int skl_check_main_surface(const struct intel_crtc_state *crtc_state,
2980                                   struct intel_plane_state *plane_state)
2981 {
2982         struct drm_i915_private *dev_priv =
2983                 to_i915(plane_state->base.plane->dev);
2984         const struct drm_framebuffer *fb = plane_state->base.fb;
2985         unsigned int rotation = plane_state->base.rotation;
2986         int x = plane_state->base.src.x1 >> 16;
2987         int y = plane_state->base.src.y1 >> 16;
2988         int w = drm_rect_width(&plane_state->base.src) >> 16;
2989         int h = drm_rect_height(&plane_state->base.src) >> 16;
2990         int dst_x = plane_state->base.dst.x1;
2991         int pipe_src_w = crtc_state->pipe_src_w;
2992         int max_width = skl_max_plane_width(fb, 0, rotation);
2993         int max_height = 4096;
2994         u32 alignment, offset, aux_offset = plane_state->aux.offset;
2995
2996         if (w > max_width || h > max_height) {
2997                 DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n",
2998                               w, h, max_width, max_height);
2999                 return -EINVAL;
3000         }
3001
3002         /*
3003          * Display WA #1175: cnl,glk
3004          * Planes other than the cursor may cause FIFO underflow and display
3005          * corruption if starting less than 4 pixels from the right edge of
3006          * the screen.
3007          * Besides the above WA fix the similar problem, where planes other
3008          * than the cursor ending less than 4 pixels from the left edge of the
3009          * screen may cause FIFO underflow and display corruption.
3010          */
3011         if ((IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
3012             (dst_x + w < 4 || dst_x > pipe_src_w - 4)) {
3013                 DRM_DEBUG_KMS("requested plane X %s position %d invalid (valid range %d-%d)\n",
3014                               dst_x + w < 4 ? "end" : "start",
3015                               dst_x + w < 4 ? dst_x + w : dst_x,
3016                               4, pipe_src_w - 4);
3017                 return -ERANGE;
3018         }
3019
3020         intel_add_fb_offsets(&x, &y, plane_state, 0);
3021         offset = intel_compute_tile_offset(&x, &y, plane_state, 0);
3022         alignment = intel_surf_alignment(fb, 0);
3023
3024         /*
3025          * AUX surface offset is specified as the distance from the
3026          * main surface offset, and it must be non-negative. Make
3027          * sure that is what we will get.
3028          */
3029         if (offset > aux_offset)
3030                 offset = intel_adjust_tile_offset(&x, &y, plane_state, 0,
3031                                                   offset, aux_offset & ~(alignment - 1));
3032
3033         /*
3034          * When using an X-tiled surface, the plane blows up
3035          * if the x offset + width exceed the stride.
3036          *
3037          * TODO: linear and Y-tiled seem fine, Yf untested,
3038          */
3039         if (fb->modifier == I915_FORMAT_MOD_X_TILED) {
3040                 int cpp = fb->format->cpp[0];
3041
3042                 while ((x + w) * cpp > fb->pitches[0]) {
3043                         if (offset == 0) {
3044                                 DRM_DEBUG_KMS("Unable to find suitable display surface offset due to X-tiling\n");
3045                                 return -EINVAL;
3046                         }
3047
3048                         offset = intel_adjust_tile_offset(&x, &y, plane_state, 0,
3049                                                           offset, offset - alignment);
3050                 }
3051         }
3052
3053         /*
3054          * CCS AUX surface doesn't have its own x/y offsets, we must make sure
3055          * they match with the main surface x/y offsets.
3056          */
3057         if (fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
3058             fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS) {
3059                 while (!skl_check_main_ccs_coordinates(plane_state, x, y, offset)) {
3060                         if (offset == 0)
3061                                 break;
3062
3063                         offset = intel_adjust_tile_offset(&x, &y, plane_state, 0,
3064                                                           offset, offset - alignment);
3065                 }
3066
3067                 if (x != plane_state->aux.x || y != plane_state->aux.y) {
3068                         DRM_DEBUG_KMS("Unable to find suitable display surface offset due to CCS\n");
3069                         return -EINVAL;
3070                 }
3071         }
3072
3073         plane_state->main.offset = offset;
3074         plane_state->main.x = x;
3075         plane_state->main.y = y;
3076
3077         return 0;
3078 }
3079
3080 static int
3081 skl_check_nv12_surface(const struct intel_crtc_state *crtc_state,
3082                        struct intel_plane_state *plane_state)
3083 {
3084         /* Display WA #1106 */
3085         if (plane_state->base.rotation !=
3086             (DRM_MODE_REFLECT_X | DRM_MODE_ROTATE_90) &&
3087             plane_state->base.rotation != DRM_MODE_ROTATE_270)
3088                 return 0;
3089
3090         /*
3091          * src coordinates are rotated here.
3092          * We check height but report it as width
3093          */
3094         if (((drm_rect_height(&plane_state->base.src) >> 16) % 4) != 0) {
3095                 DRM_DEBUG_KMS("src width must be multiple "
3096                               "of 4 for rotated NV12\n");
3097                 return -EINVAL;
3098         }
3099
3100         return 0;
3101 }
3102
3103 static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
3104 {
3105         const struct drm_framebuffer *fb = plane_state->base.fb;
3106         unsigned int rotation = plane_state->base.rotation;
3107         int max_width = skl_max_plane_width(fb, 1, rotation);
3108         int max_height = 4096;
3109         int x = plane_state->base.src.x1 >> 17;
3110         int y = plane_state->base.src.y1 >> 17;
3111         int w = drm_rect_width(&plane_state->base.src) >> 17;
3112         int h = drm_rect_height(&plane_state->base.src) >> 17;
3113         u32 offset;
3114
3115         intel_add_fb_offsets(&x, &y, plane_state, 1);
3116         offset = intel_compute_tile_offset(&x, &y, plane_state, 1);
3117
3118         /* FIXME not quite sure how/if these apply to the chroma plane */
3119         if (w > max_width || h > max_height) {
3120                 DRM_DEBUG_KMS("CbCr source size %dx%d too big (limit %dx%d)\n",
3121                               w, h, max_width, max_height);
3122                 return -EINVAL;
3123         }
3124
3125         plane_state->aux.offset = offset;
3126         plane_state->aux.x = x;
3127         plane_state->aux.y = y;
3128
3129         return 0;
3130 }
3131
3132 static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
3133 {
3134         const struct drm_framebuffer *fb = plane_state->base.fb;
3135         int src_x = plane_state->base.src.x1 >> 16;
3136         int src_y = plane_state->base.src.y1 >> 16;
3137         int hsub = fb->format->hsub;
3138         int vsub = fb->format->vsub;
3139         int x = src_x / hsub;
3140         int y = src_y / vsub;
3141         u32 offset;
3142
3143         if (plane_state->base.rotation & ~(DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180)) {
3144                 DRM_DEBUG_KMS("RC support only with 0/180 degree rotation %x\n",
3145                               plane_state->base.rotation);
3146                 return -EINVAL;
3147         }
3148
3149         intel_add_fb_offsets(&x, &y, plane_state, 1);
3150         offset = intel_compute_tile_offset(&x, &y, plane_state, 1);
3151
3152         plane_state->aux.offset = offset;
3153         plane_state->aux.x = x * hsub + src_x % hsub;
3154         plane_state->aux.y = y * vsub + src_y % vsub;
3155
3156         return 0;
3157 }
3158
3159 int skl_check_plane_surface(const struct intel_crtc_state *crtc_state,
3160                             struct intel_plane_state *plane_state)
3161 {
3162         const struct drm_framebuffer *fb = plane_state->base.fb;
3163         unsigned int rotation = plane_state->base.rotation;
3164         int ret;
3165
3166         if (rotation & DRM_MODE_REFLECT_X &&
3167             fb->modifier == DRM_FORMAT_MOD_LINEAR) {
3168                 DRM_DEBUG_KMS("horizontal flip is not supported with linear surface formats\n");
3169                 return -EINVAL;
3170         }
3171
3172         if (!plane_state->base.visible)
3173                 return 0;
3174
3175         /* Rotate src coordinates to match rotated GTT view */
3176         if (drm_rotation_90_or_270(rotation))
3177                 drm_rect_rotate(&plane_state->base.src,
3178                                 fb->width << 16, fb->height << 16,
3179                                 DRM_MODE_ROTATE_270);
3180
3181         /*
3182          * Handle the AUX surface first since
3183          * the main surface setup depends on it.
3184          */
3185         if (fb->format->format == DRM_FORMAT_NV12) {
3186                 ret = skl_check_nv12_surface(crtc_state, plane_state);
3187                 if (ret)
3188                         return ret;
3189                 ret = skl_check_nv12_aux_surface(plane_state);
3190                 if (ret)
3191                         return ret;
3192         } else if (fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
3193                    fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS) {
3194                 ret = skl_check_ccs_aux_surface(plane_state);
3195                 if (ret)
3196                         return ret;
3197         } else {
3198                 plane_state->aux.offset = ~0xfff;
3199                 plane_state->aux.x = 0;
3200                 plane_state->aux.y = 0;
3201         }
3202
3203         ret = skl_check_main_surface(crtc_state, plane_state);
3204         if (ret)
3205                 return ret;
3206
3207         return 0;
3208 }
3209
3210 static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
3211                           const struct intel_plane_state *plane_state)
3212 {
3213         struct drm_i915_private *dev_priv =
3214                 to_i915(plane_state->base.plane->dev);
3215         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
3216         const struct drm_framebuffer *fb = plane_state->base.fb;
3217         unsigned int rotation = plane_state->base.rotation;
3218         u32 dspcntr;
3219
3220         dspcntr = DISPLAY_PLANE_ENABLE | DISPPLANE_GAMMA_ENABLE;
3221
3222         if (IS_G4X(dev_priv) || IS_GEN5(dev_priv) ||
3223             IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv))
3224                 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
3225
3226         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
3227                 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
3228
3229         if (INTEL_GEN(dev_priv) < 5)
3230                 dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe);
3231
3232         switch (fb->format->format) {
3233         case DRM_FORMAT_C8:
3234                 dspcntr |= DISPPLANE_8BPP;
3235                 break;
3236         case DRM_FORMAT_XRGB1555:
3237                 dspcntr |= DISPPLANE_BGRX555;
3238                 break;
3239         case DRM_FORMAT_RGB565:
3240                 dspcntr |= DISPPLANE_BGRX565;
3241                 break;
3242         case DRM_FORMAT_XRGB8888:
3243                 dspcntr |= DISPPLANE_BGRX888;
3244                 break;
3245         case DRM_FORMAT_XBGR8888:
3246                 dspcntr |= DISPPLANE_RGBX888;
3247                 break;
3248         case DRM_FORMAT_XRGB2101010:
3249                 dspcntr |= DISPPLANE_BGRX101010;
3250                 break;
3251         case DRM_FORMAT_XBGR2101010:
3252                 dspcntr |= DISPPLANE_RGBX101010;
3253                 break;
3254         default:
3255                 MISSING_CASE(fb->format->format);
3256                 return 0;
3257         }
3258
3259         if (INTEL_GEN(dev_priv) >= 4 &&
3260             fb->modifier == I915_FORMAT_MOD_X_TILED)
3261                 dspcntr |= DISPPLANE_TILED;
3262
3263         if (rotation & DRM_MODE_ROTATE_180)
3264                 dspcntr |= DISPPLANE_ROTATE_180;
3265
3266         if (rotation & DRM_MODE_REFLECT_X)
3267                 dspcntr |= DISPPLANE_MIRROR;
3268
3269         return dspcntr;
3270 }
3271
3272 int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
3273 {
3274         struct drm_i915_private *dev_priv =
3275                 to_i915(plane_state->base.plane->dev);
3276         int src_x = plane_state->base.src.x1 >> 16;
3277         int src_y = plane_state->base.src.y1 >> 16;
3278         u32 offset;
3279
3280         intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
3281
3282         if (INTEL_GEN(dev_priv) >= 4)
3283                 offset = intel_compute_tile_offset(&src_x, &src_y,
3284                                                    plane_state, 0);
3285         else
3286                 offset = 0;
3287
3288         /* HSW/BDW do this automagically in hardware */
3289         if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
3290                 unsigned int rotation = plane_state->base.rotation;
3291                 int src_w = drm_rect_width(&plane_state->base.src) >> 16;
3292                 int src_h = drm_rect_height(&plane_state->base.src) >> 16;
3293
3294                 if (rotation & DRM_MODE_ROTATE_180) {
3295                         src_x += src_w - 1;
3296                         src_y += src_h - 1;
3297                 } else if (rotation & DRM_MODE_REFLECT_X) {
3298                         src_x += src_w - 1;
3299                 }
3300         }
3301
3302         plane_state->main.offset = offset;
3303         plane_state->main.x = src_x;
3304         plane_state->main.y = src_y;
3305
3306         return 0;
3307 }
3308
3309 static void i9xx_update_plane(struct intel_plane *plane,
3310                               const struct intel_crtc_state *crtc_state,
3311                               const struct intel_plane_state *plane_state)
3312 {
3313         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3314         const struct drm_framebuffer *fb = plane_state->base.fb;
3315         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3316         u32 linear_offset;
3317         u32 dspcntr = plane_state->ctl;
3318         i915_reg_t reg = DSPCNTR(i9xx_plane);
3319         int x = plane_state->main.x;
3320         int y = plane_state->main.y;
3321         unsigned long irqflags;
3322         u32 dspaddr_offset;
3323
3324         linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
3325
3326         if (INTEL_GEN(dev_priv) >= 4)
3327                 dspaddr_offset = plane_state->main.offset;
3328         else
3329                 dspaddr_offset = linear_offset;
3330
3331         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
3332
3333         if (INTEL_GEN(dev_priv) < 4) {
3334                 /* pipesrc and dspsize control the size that is scaled from,
3335                  * which should always be the user's requested size.
3336                  */
3337                 I915_WRITE_FW(DSPSIZE(i9xx_plane),
3338                               ((crtc_state->pipe_src_h - 1) << 16) |
3339                               (crtc_state->pipe_src_w - 1));
3340                 I915_WRITE_FW(DSPPOS(i9xx_plane), 0);
3341         } else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
3342                 I915_WRITE_FW(PRIMSIZE(i9xx_plane),
3343                               ((crtc_state->pipe_src_h - 1) << 16) |
3344                               (crtc_state->pipe_src_w - 1));
3345                 I915_WRITE_FW(PRIMPOS(i9xx_plane), 0);
3346                 I915_WRITE_FW(PRIMCNSTALPHA(i9xx_plane), 0);
3347         }
3348
3349         I915_WRITE_FW(reg, dspcntr);
3350
3351         I915_WRITE_FW(DSPSTRIDE(i9xx_plane), fb->pitches[0]);
3352         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
3353                 I915_WRITE_FW(DSPSURF(i9xx_plane),
3354                               intel_plane_ggtt_offset(plane_state) +
3355                               dspaddr_offset);
3356                 I915_WRITE_FW(DSPOFFSET(i9xx_plane), (y << 16) | x);
3357         } else if (INTEL_GEN(dev_priv) >= 4) {
3358                 I915_WRITE_FW(DSPSURF(i9xx_plane),
3359                               intel_plane_ggtt_offset(plane_state) +
3360                               dspaddr_offset);
3361                 I915_WRITE_FW(DSPTILEOFF(i9xx_plane), (y << 16) | x);
3362                 I915_WRITE_FW(DSPLINOFF(i9xx_plane), linear_offset);
3363         } else {
3364                 I915_WRITE_FW(DSPADDR(i9xx_plane),
3365                               intel_plane_ggtt_offset(plane_state) +
3366                               dspaddr_offset);
3367         }
3368         POSTING_READ_FW(reg);
3369
3370         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
3371 }
3372
3373 static void i9xx_disable_plane(struct intel_plane *plane,
3374                                struct intel_crtc *crtc)
3375 {
3376         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3377         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3378         unsigned long irqflags;
3379
3380         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
3381
3382         I915_WRITE_FW(DSPCNTR(i9xx_plane), 0);
3383         if (INTEL_GEN(dev_priv) >= 4)
3384                 I915_WRITE_FW(DSPSURF(i9xx_plane), 0);
3385         else
3386                 I915_WRITE_FW(DSPADDR(i9xx_plane), 0);
3387         POSTING_READ_FW(DSPCNTR(i9xx_plane));
3388
3389         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
3390 }
3391
3392 static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
3393                                     enum pipe *pipe)
3394 {
3395         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3396         enum intel_display_power_domain power_domain;
3397         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
3398         bool ret;
3399         u32 val;
3400
3401         /*
3402          * Not 100% correct for planes that can move between pipes,
3403          * but that's only the case for gen2-4 which don't have any
3404          * display power wells.
3405          */
3406         power_domain = POWER_DOMAIN_PIPE(plane->pipe);
3407         if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
3408                 return false;
3409
3410         val = I915_READ(DSPCNTR(i9xx_plane));
3411
3412         ret = val & DISPLAY_PLANE_ENABLE;
3413
3414         if (INTEL_GEN(dev_priv) >= 5)
3415                 *pipe = plane->pipe;
3416         else
3417                 *pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
3418                         DISPPLANE_SEL_PIPE_SHIFT;
3419
3420         intel_display_power_put(dev_priv, power_domain);
3421
3422         return ret;
3423 }
3424
3425 static u32
3426 intel_fb_stride_alignment(const struct drm_framebuffer *fb, int plane)
3427 {
3428         if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
3429                 return 64;
3430         else
3431                 return intel_tile_width_bytes(fb, plane);
3432 }
3433
3434 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
3435 {
3436         struct drm_device *dev = intel_crtc->base.dev;
3437         struct drm_i915_private *dev_priv = to_i915(dev);
3438
3439         I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
3440         I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
3441         I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
3442 }
3443
3444 /*
3445  * This function detaches (aka. unbinds) unused scalers in hardware
3446  */
3447 static void skl_detach_scalers(struct intel_crtc *intel_crtc)
3448 {
3449         struct intel_crtc_scaler_state *scaler_state;
3450         int i;
3451
3452         scaler_state = &intel_crtc->config->scaler_state;
3453
3454         /* loop through and disable scalers that aren't in use */
3455         for (i = 0; i < intel_crtc->num_scalers; i++) {
3456                 if (!scaler_state->scalers[i].in_use)
3457                         skl_detach_scaler(intel_crtc, i);
3458         }
3459 }
3460
3461 u32 skl_plane_stride(const struct drm_framebuffer *fb, int plane,
3462                      unsigned int rotation)
3463 {
3464         u32 stride;
3465
3466         if (plane >= fb->format->num_planes)
3467                 return 0;
3468
3469         stride = intel_fb_pitch(fb, plane, rotation);
3470
3471         /*
3472          * The stride is either expressed as a multiple of 64 bytes chunks for
3473          * linear buffers or in number of tiles for tiled buffers.
3474          */
3475         if (drm_rotation_90_or_270(rotation))
3476                 stride /= intel_tile_height(fb, plane);
3477         else
3478                 stride /= intel_fb_stride_alignment(fb, plane);
3479
3480         return stride;
3481 }
3482
3483 static u32 skl_plane_ctl_format(uint32_t pixel_format)
3484 {
3485         switch (pixel_format) {
3486         case DRM_FORMAT_C8:
3487                 return PLANE_CTL_FORMAT_INDEXED;
3488         case DRM_FORMAT_RGB565:
3489                 return PLANE_CTL_FORMAT_RGB_565;
3490         case DRM_FORMAT_XBGR8888:
3491         case DRM_FORMAT_ABGR8888:
3492                 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
3493         case DRM_FORMAT_XRGB8888:
3494         case DRM_FORMAT_ARGB8888:
3495                 return PLANE_CTL_FORMAT_XRGB_8888;
3496         case DRM_FORMAT_XRGB2101010:
3497                 return PLANE_CTL_FORMAT_XRGB_2101010;
3498         case DRM_FORMAT_XBGR2101010:
3499                 return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010;
3500         case DRM_FORMAT_YUYV:
3501                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
3502         case DRM_FORMAT_YVYU:
3503                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
3504         case DRM_FORMAT_UYVY:
3505                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
3506         case DRM_FORMAT_VYUY:
3507                 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
3508         case DRM_FORMAT_NV12:
3509                 return PLANE_CTL_FORMAT_NV12;
3510         default:
3511                 MISSING_CASE(pixel_format);
3512         }
3513
3514         return 0;
3515 }
3516
3517 /*
3518  * XXX: For ARBG/ABGR formats we default to expecting scanout buffers
3519  * to be already pre-multiplied. We need to add a knob (or a different
3520  * DRM_FORMAT) for user-space to configure that.
3521  */
3522 static u32 skl_plane_ctl_alpha(uint32_t pixel_format)
3523 {
3524         switch (pixel_format) {
3525         case DRM_FORMAT_ABGR8888:
3526         case DRM_FORMAT_ARGB8888:
3527                 return PLANE_CTL_ALPHA_SW_PREMULTIPLY;
3528         default:
3529                 return PLANE_CTL_ALPHA_DISABLE;
3530         }
3531 }
3532
3533 static u32 glk_plane_color_ctl_alpha(uint32_t pixel_format)
3534 {
3535         switch (pixel_format) {
3536         case DRM_FORMAT_ABGR8888:
3537         case DRM_FORMAT_ARGB8888:
3538                 return PLANE_COLOR_ALPHA_SW_PREMULTIPLY;
3539         default:
3540                 return PLANE_COLOR_ALPHA_DISABLE;
3541         }
3542 }
3543
3544 static u32 skl_plane_ctl_tiling(uint64_t fb_modifier)
3545 {
3546         switch (fb_modifier) {
3547         case DRM_FORMAT_MOD_LINEAR:
3548                 break;
3549         case I915_FORMAT_MOD_X_TILED:
3550                 return PLANE_CTL_TILED_X;
3551         case I915_FORMAT_MOD_Y_TILED:
3552                 return PLANE_CTL_TILED_Y;
3553         case I915_FORMAT_MOD_Y_TILED_CCS:
3554                 return PLANE_CTL_TILED_Y | PLANE_CTL_DECOMPRESSION_ENABLE;
3555         case I915_FORMAT_MOD_Yf_TILED:
3556                 return PLANE_CTL_TILED_YF;
3557         case I915_FORMAT_MOD_Yf_TILED_CCS:
3558                 return PLANE_CTL_TILED_YF | PLANE_CTL_DECOMPRESSION_ENABLE;
3559         default:
3560                 MISSING_CASE(fb_modifier);
3561         }
3562
3563         return 0;
3564 }
3565
3566 static u32 skl_plane_ctl_rotate(unsigned int rotate)
3567 {
3568         switch (rotate) {
3569         case DRM_MODE_ROTATE_0:
3570                 break;
3571         /*
3572          * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
3573          * while i915 HW rotation is clockwise, thats why this swapping.
3574          */
3575         case DRM_MODE_ROTATE_90:
3576                 return PLANE_CTL_ROTATE_270;
3577         case DRM_MODE_ROTATE_180:
3578                 return PLANE_CTL_ROTATE_180;
3579         case DRM_MODE_ROTATE_270:
3580                 return PLANE_CTL_ROTATE_90;
3581         default:
3582                 MISSING_CASE(rotate);
3583         }
3584
3585         return 0;
3586 }
3587
3588 static u32 cnl_plane_ctl_flip(unsigned int reflect)
3589 {
3590         switch (reflect) {
3591         case 0:
3592                 break;
3593         case DRM_MODE_REFLECT_X:
3594                 return PLANE_CTL_FLIP_HORIZONTAL;
3595         case DRM_MODE_REFLECT_Y:
3596         default:
3597                 MISSING_CASE(reflect);
3598         }
3599
3600         return 0;
3601 }
3602
3603 u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
3604                   const struct intel_plane_state *plane_state)
3605 {
3606         struct drm_i915_private *dev_priv =
3607                 to_i915(plane_state->base.plane->dev);
3608         const struct drm_framebuffer *fb = plane_state->base.fb;
3609         unsigned int rotation = plane_state->base.rotation;
3610         const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
3611         u32 plane_ctl;
3612
3613         plane_ctl = PLANE_CTL_ENABLE;
3614
3615         if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) {
3616                 plane_ctl |= skl_plane_ctl_alpha(fb->format->format);
3617                 plane_ctl |=
3618                         PLANE_CTL_PIPE_GAMMA_ENABLE |
3619                         PLANE_CTL_PIPE_CSC_ENABLE |
3620                         PLANE_CTL_PLANE_GAMMA_DISABLE;
3621
3622                 if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
3623                         plane_ctl |= PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709;
3624
3625                 if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
3626                         plane_ctl |= PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE;
3627         }
3628
3629         plane_ctl |= skl_plane_ctl_format(fb->format->format);
3630         plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
3631         plane_ctl |= skl_plane_ctl_rotate(rotation & DRM_MODE_ROTATE_MASK);
3632
3633         if (INTEL_GEN(dev_priv) >= 10)
3634                 plane_ctl |= cnl_plane_ctl_flip(rotation &
3635                                                 DRM_MODE_REFLECT_MASK);
3636
3637         if (key->flags & I915_SET_COLORKEY_DESTINATION)
3638                 plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
3639         else if (key->flags & I915_SET_COLORKEY_SOURCE)
3640                 plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
3641
3642         return plane_ctl;
3643 }
3644
3645 u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
3646                         const struct intel_plane_state *plane_state)
3647 {
3648         struct drm_i915_private *dev_priv =
3649                 to_i915(plane_state->base.plane->dev);
3650         const struct drm_framebuffer *fb = plane_state->base.fb;
3651         u32 plane_color_ctl = 0;
3652
3653         if (INTEL_GEN(dev_priv) < 11) {
3654                 plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE;
3655                 plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
3656         }
3657         plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
3658         plane_color_ctl |= glk_plane_color_ctl_alpha(fb->format->format);
3659
3660         if (fb->format->is_yuv) {
3661                 if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
3662                         plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
3663                 else
3664                         plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709;
3665
3666                 if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
3667                         plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
3668         }
3669
3670         return plane_color_ctl;
3671 }
3672
3673 static int
3674 __intel_display_resume(struct drm_device *dev,
3675                        struct drm_atomic_state *state,
3676                        struct drm_modeset_acquire_ctx *ctx)
3677 {
3678         struct drm_crtc_state *crtc_state;
3679         struct drm_crtc *crtc;
3680         int i, ret;
3681
3682         intel_modeset_setup_hw_state(dev, ctx);
3683         i915_redisable_vga(to_i915(dev));
3684
3685         if (!state)
3686                 return 0;
3687
3688         /*
3689          * We've duplicated the state, pointers to the old state are invalid.
3690          *
3691          * Don't attempt to use the old state until we commit the duplicated state.
3692          */
3693         for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
3694                 /*
3695                  * Force recalculation even if we restore
3696                  * current state. With fast modeset this may not result
3697                  * in a modeset when the state is compatible.
3698                  */
3699                 crtc_state->mode_changed = true;
3700         }
3701
3702         /* ignore any reset values/BIOS leftovers in the WM registers */
3703         if (!HAS_GMCH_DISPLAY(to_i915(dev)))
3704                 to_intel_atomic_state(state)->skip_intermediate_wm = true;
3705
3706         ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
3707
3708         WARN_ON(ret == -EDEADLK);
3709         return ret;
3710 }
3711
3712 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
3713 {
3714         return intel_has_gpu_reset(dev_priv) &&
3715                 INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv);
3716 }
3717
3718 void intel_prepare_reset(struct drm_i915_private *dev_priv)
3719 {
3720         struct drm_device *dev = &dev_priv->drm;
3721         struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
3722         struct drm_atomic_state *state;
3723         int ret;
3724
3725         /* reset doesn't touch the display */
3726         if (!i915_modparams.force_reset_modeset_test &&
3727             !gpu_reset_clobbers_display(dev_priv))
3728                 return;
3729
3730         /* We have a modeset vs reset deadlock, defensively unbreak it. */
3731         set_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags);
3732         wake_up_all(&dev_priv->gpu_error.wait_queue);
3733
3734         if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
3735                 DRM_DEBUG_KMS("Modeset potentially stuck, unbreaking through wedging\n");
3736                 i915_gem_set_wedged(dev_priv);
3737         }
3738
3739         /*
3740          * Need mode_config.mutex so that we don't
3741          * trample ongoing ->detect() and whatnot.
3742          */
3743         mutex_lock(&dev->mode_config.mutex);
3744         drm_modeset_acquire_init(ctx, 0);
3745         while (1) {
3746                 ret = drm_modeset_lock_all_ctx(dev, ctx);
3747                 if (ret != -EDEADLK)
3748                         break;
3749
3750                 drm_modeset_backoff(ctx);
3751         }
3752         /*
3753          * Disabling the crtcs gracefully seems nicer. Also the
3754          * g33 docs say we should at least disable all the planes.
3755          */
3756         state = drm_atomic_helper_duplicate_state(dev, ctx);
3757         if (IS_ERR(state)) {
3758                 ret = PTR_ERR(state);
3759                 DRM_ERROR("Duplicating state failed with %i\n", ret);
3760                 return;
3761         }
3762
3763         ret = drm_atomic_helper_disable_all(dev, ctx);
3764         if (ret) {
3765                 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
3766                 drm_atomic_state_put(state);
3767                 return;
3768         }
3769
3770         dev_priv->modeset_restore_state = state;
3771         state->acquire_ctx = ctx;
3772 }
3773
3774 void intel_finish_reset(struct drm_i915_private *dev_priv)
3775 {
3776         struct drm_device *dev = &dev_priv->drm;
3777         struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
3778         struct drm_atomic_state *state;
3779         int ret;
3780
3781         /* reset doesn't touch the display */
3782         if (!test_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags))
3783                 return;
3784
3785         state = fetch_and_zero(&dev_priv->modeset_restore_state);
3786         if (!state)
3787                 goto unlock;
3788
3789         /* reset doesn't touch the display */
3790         if (!gpu_reset_clobbers_display(dev_priv)) {
3791                 /* for testing only restore the display */
3792                 ret = __intel_display_resume(dev, state, ctx);
3793                 if (ret)
3794                         DRM_ERROR("Restoring old state failed with %i\n", ret);
3795         } else {
3796                 /*
3797                  * The display has been reset as well,
3798                  * so need a full re-initialization.
3799                  */
3800                 intel_runtime_pm_disable_interrupts(dev_priv);
3801                 intel_runtime_pm_enable_interrupts(dev_priv);
3802
3803                 intel_pps_unlock_regs_wa(dev_priv);
3804                 intel_modeset_init_hw(dev);
3805                 intel_init_clock_gating(dev_priv);
3806
3807                 spin_lock_irq(&dev_priv->irq_lock);
3808                 if (dev_priv->display.hpd_irq_setup)
3809                         dev_priv->display.hpd_irq_setup(dev_priv);
3810                 spin_unlock_irq(&dev_priv->irq_lock);
3811
3812                 ret = __intel_display_resume(dev, state, ctx);
3813                 if (ret)
3814                         DRM_ERROR("Restoring old state failed with %i\n", ret);
3815
3816                 intel_hpd_init(dev_priv);
3817         }
3818
3819         drm_atomic_state_put(state);
3820 unlock:
3821         drm_modeset_drop_locks(ctx);
3822         drm_modeset_acquire_fini(ctx);
3823         mutex_unlock(&dev->mode_config.mutex);
3824
3825         clear_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags);
3826 }
3827
3828 static void intel_update_pipe_config(const struct intel_crtc_state *old_crtc_state,
3829                                      const struct intel_crtc_state *new_crtc_state)
3830 {
3831         struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
3832         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3833
3834         /* drm_atomic_helper_update_legacy_modeset_state might not be called. */
3835         crtc->base.mode = new_crtc_state->base.mode;
3836
3837         /*
3838          * Update pipe size and adjust fitter if needed: the reason for this is
3839          * that in compute_mode_changes we check the native mode (not the pfit
3840          * mode) to see if we can flip rather than do a full mode set. In the
3841          * fastboot case, we'll flip, but if we don't update the pipesrc and
3842          * pfit state, we'll end up with a big fb scanned out into the wrong
3843          * sized surface.
3844          */
3845
3846         I915_WRITE(PIPESRC(crtc->pipe),
3847                    ((new_crtc_state->pipe_src_w - 1) << 16) |
3848                    (new_crtc_state->pipe_src_h - 1));
3849
3850         /* on skylake this is done by detaching scalers */
3851         if (INTEL_GEN(dev_priv) >= 9) {
3852                 skl_detach_scalers(crtc);
3853
3854                 if (new_crtc_state->pch_pfit.enabled)
3855                         skylake_pfit_enable(crtc);
3856         } else if (HAS_PCH_SPLIT(dev_priv)) {
3857                 if (new_crtc_state->pch_pfit.enabled)
3858                         ironlake_pfit_enable(crtc);
3859                 else if (old_crtc_state->pch_pfit.enabled)
3860                         ironlake_pfit_disable(crtc, true);
3861         }
3862 }
3863
3864 static void intel_fdi_normal_train(struct intel_crtc *crtc)
3865 {
3866         struct drm_device *dev = crtc->base.dev;
3867         struct drm_i915_private *dev_priv = to_i915(dev);
3868         int pipe = crtc->pipe;
3869         i915_reg_t reg;
3870         u32 temp;
3871
3872         /* enable normal train */
3873         reg = FDI_TX_CTL(pipe);
3874         temp = I915_READ(reg);
3875         if (IS_IVYBRIDGE(dev_priv)) {
3876                 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
3877                 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
3878         } else {
3879                 temp &= ~FDI_LINK_TRAIN_NONE;
3880                 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
3881         }
3882         I915_WRITE(reg, temp);
3883
3884         reg = FDI_RX_CTL(pipe);
3885         temp = I915_READ(reg);
3886         if (HAS_PCH_CPT(dev_priv)) {
3887                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3888                 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
3889         } else {
3890                 temp &= ~FDI_LINK_TRAIN_NONE;
3891                 temp |= FDI_LINK_TRAIN_NONE;
3892         }
3893         I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
3894
3895         /* wait one idle pattern time */
3896         POSTING_READ(reg);
3897         udelay(1000);
3898
3899         /* IVB wants error correction enabled */
3900         if (IS_IVYBRIDGE(dev_priv))
3901                 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
3902                            FDI_FE_ERRC_ENABLE);
3903 }
3904
3905 /* The FDI link training functions for ILK/Ibexpeak. */
3906 static void ironlake_fdi_link_train(struct intel_crtc *crtc,
3907                                     const struct intel_crtc_state *crtc_state)
3908 {
3909         struct drm_device *dev = crtc->base.dev;
3910         struct drm_i915_private *dev_priv = to_i915(dev);
3911         int pipe = crtc->pipe;
3912         i915_reg_t reg;
3913         u32 temp, tries;
3914
3915         /* FDI needs bits from pipe first */
3916         assert_pipe_enabled(dev_priv, pipe);
3917
3918         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3919            for train result */
3920         reg = FDI_RX_IMR(pipe);
3921         temp = I915_READ(reg);
3922         temp &= ~FDI_RX_SYMBOL_LOCK;
3923         temp &= ~FDI_RX_BIT_LOCK;
3924         I915_WRITE(reg, temp);
3925         I915_READ(reg);
3926         udelay(150);
3927
3928         /* enable CPU FDI TX and PCH FDI RX */
3929         reg = FDI_TX_CTL(pipe);
3930         temp = I915_READ(reg);
3931         temp &= ~FDI_DP_PORT_WIDTH_MASK;
3932         temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
3933         temp &= ~FDI_LINK_TRAIN_NONE;
3934         temp |= FDI_LINK_TRAIN_PATTERN_1;
3935         I915_WRITE(reg, temp | FDI_TX_ENABLE);
3936
3937         reg = FDI_RX_CTL(pipe);
3938         temp = I915_READ(reg);
3939         temp &= ~FDI_LINK_TRAIN_NONE;
3940         temp |= FDI_LINK_TRAIN_PATTERN_1;
3941         I915_WRITE(reg, temp | FDI_RX_ENABLE);
3942
3943         POSTING_READ(reg);
3944         udelay(150);
3945
3946         /* Ironlake workaround, enable clock pointer after FDI enable*/
3947         I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
3948         I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
3949                    FDI_RX_PHASE_SYNC_POINTER_EN);
3950
3951         reg = FDI_RX_IIR(pipe);
3952         for (tries = 0; tries < 5; tries++) {
3953                 temp = I915_READ(reg);
3954                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3955
3956                 if ((temp & FDI_RX_BIT_LOCK)) {
3957                         DRM_DEBUG_KMS("FDI train 1 done.\n");
3958                         I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3959                         break;
3960                 }
3961         }
3962         if (tries == 5)
3963                 DRM_ERROR("FDI train 1 fail!\n");
3964
3965         /* Train 2 */
3966         reg = FDI_TX_CTL(pipe);
3967         temp = I915_READ(reg);
3968         temp &= ~FDI_LINK_TRAIN_NONE;
3969         temp |= FDI_LINK_TRAIN_PATTERN_2;
3970         I915_WRITE(reg, temp);
3971
3972         reg = FDI_RX_CTL(pipe);
3973         temp = I915_READ(reg);
3974         temp &= ~FDI_LINK_TRAIN_NONE;
3975         temp |= FDI_LINK_TRAIN_PATTERN_2;
3976         I915_WRITE(reg, temp);
3977
3978         POSTING_READ(reg);
3979         udelay(150);
3980
3981         reg = FDI_RX_IIR(pipe);
3982         for (tries = 0; tries < 5; tries++) {
3983                 temp = I915_READ(reg);
3984                 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3985
3986                 if (temp & FDI_RX_SYMBOL_LOCK) {
3987                         I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3988                         DRM_DEBUG_KMS("FDI train 2 done.\n");
3989                         break;
3990                 }
3991         }
3992         if (tries == 5)
3993                 DRM_ERROR("FDI train 2 fail!\n");
3994
3995         DRM_DEBUG_KMS("FDI train done\n");
3996
3997 }
3998
3999 static const int snb_b_fdi_train_param[] = {
4000         FDI_LINK_TRAIN_400MV_0DB_SNB_B,
4001         FDI_LINK_TRAIN_400MV_6DB_SNB_B,
4002         FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
4003         FDI_LINK_TRAIN_800MV_0DB_SNB_B,
4004 };
4005
4006 /* The FDI link training functions for SNB/Cougarpoint. */
4007 static void gen6_fdi_link_train(struct intel_crtc *crtc,
4008                                 const struct intel_crtc_state *crtc_state)
4009 {
4010         struct drm_device *dev = crtc->base.dev;
4011         struct drm_i915_private *dev_priv = to_i915(dev);
4012         int pipe = crtc->pipe;
4013         i915_reg_t reg;
4014         u32 temp, i, retry;
4015
4016         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4017            for train result */
4018         reg = FDI_RX_IMR(pipe);
4019         temp = I915_READ(reg);
4020         temp &= ~FDI_RX_SYMBOL_LOCK;
4021         temp &= ~FDI_RX_BIT_LOCK;
4022         I915_WRITE(reg, temp);
4023
4024         POSTING_READ(reg);
4025         udelay(150);
4026
4027         /* enable CPU FDI TX and PCH FDI RX */
4028         reg = FDI_TX_CTL(pipe);
4029         temp = I915_READ(reg);
4030         temp &= ~FDI_DP_PORT_WIDTH_MASK;
4031         temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4032         temp &= ~FDI_LINK_TRAIN_NONE;
4033         temp |= FDI_LINK_TRAIN_PATTERN_1;
4034         temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4035         /* SNB-B */
4036         temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
4037         I915_WRITE(reg, temp | FDI_TX_ENABLE);
4038
4039         I915_WRITE(FDI_RX_MISC(pipe),
4040                    FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
4041
4042         reg = FDI_RX_CTL(pipe);
4043         temp = I915_READ(reg);
4044         if (HAS_PCH_CPT(dev_priv)) {
4045                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4046                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4047         } else {
4048                 temp &= ~FDI_LINK_TRAIN_NONE;
4049                 temp |= FDI_LINK_TRAIN_PATTERN_1;
4050         }
4051         I915_WRITE(reg, temp | FDI_RX_ENABLE);
4052
4053         POSTING_READ(reg);
4054         udelay(150);
4055
4056         for (i = 0; i < 4; i++) {
4057                 reg = FDI_TX_CTL(pipe);
4058                 temp = I915_READ(reg);
4059                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4060                 temp |= snb_b_fdi_train_param[i];
4061                 I915_WRITE(reg, temp);
4062
4063                 POSTING_READ(reg);
4064                 udelay(500);
4065
4066                 for (retry = 0; retry < 5; retry++) {
4067                         reg = FDI_RX_IIR(pipe);
4068                         temp = I915_READ(reg);
4069                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4070                         if (temp & FDI_RX_BIT_LOCK) {
4071                                 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4072                                 DRM_DEBUG_KMS("FDI train 1 done.\n");
4073                                 break;
4074                         }
4075                         udelay(50);
4076                 }
4077                 if (retry < 5)
4078                         break;
4079         }
4080         if (i == 4)
4081                 DRM_ERROR("FDI train 1 fail!\n");
4082
4083         /* Train 2 */
4084         reg = FDI_TX_CTL(pipe);
4085         temp = I915_READ(reg);
4086         temp &= ~FDI_LINK_TRAIN_NONE;
4087         temp |= FDI_LINK_TRAIN_PATTERN_2;
4088         if (IS_GEN6(dev_priv)) {
4089                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4090                 /* SNB-B */
4091                 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
4092         }
4093         I915_WRITE(reg, temp);
4094
4095         reg = FDI_RX_CTL(pipe);
4096         temp = I915_READ(reg);
4097         if (HAS_PCH_CPT(dev_priv)) {
4098                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4099                 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
4100         } else {
4101                 temp &= ~FDI_LINK_TRAIN_NONE;
4102                 temp |= FDI_LINK_TRAIN_PATTERN_2;
4103         }
4104         I915_WRITE(reg, temp);
4105
4106         POSTING_READ(reg);
4107         udelay(150);
4108
4109         for (i = 0; i < 4; i++) {
4110                 reg = FDI_TX_CTL(pipe);
4111                 temp = I915_READ(reg);
4112                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4113                 temp |= snb_b_fdi_train_param[i];
4114                 I915_WRITE(reg, temp);
4115
4116                 POSTING_READ(reg);
4117                 udelay(500);
4118
4119                 for (retry = 0; retry < 5; retry++) {
4120                         reg = FDI_RX_IIR(pipe);
4121                         temp = I915_READ(reg);
4122                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4123                         if (temp & FDI_RX_SYMBOL_LOCK) {
4124                                 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4125                                 DRM_DEBUG_KMS("FDI train 2 done.\n");
4126                                 break;
4127                         }
4128                         udelay(50);
4129                 }
4130                 if (retry < 5)
4131                         break;
4132         }
4133         if (i == 4)
4134                 DRM_ERROR("FDI train 2 fail!\n");
4135
4136         DRM_DEBUG_KMS("FDI train done.\n");
4137 }
4138
4139 /* Manual link training for Ivy Bridge A0 parts */
4140 static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
4141                                       const struct intel_crtc_state *crtc_state)
4142 {
4143         struct drm_device *dev = crtc->base.dev;
4144         struct drm_i915_private *dev_priv = to_i915(dev);
4145         int pipe = crtc->pipe;
4146         i915_reg_t reg;
4147         u32 temp, i, j;
4148
4149         /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4150            for train result */
4151         reg = FDI_RX_IMR(pipe);
4152         temp = I915_READ(reg);
4153         temp &= ~FDI_RX_SYMBOL_LOCK;
4154         temp &= ~FDI_RX_BIT_LOCK;
4155         I915_WRITE(reg, temp);
4156
4157         POSTING_READ(reg);
4158         udelay(150);
4159
4160         DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
4161                       I915_READ(FDI_RX_IIR(pipe)));
4162
4163         /* Try each vswing and preemphasis setting twice before moving on */
4164         for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
4165                 /* disable first in case we need to retry */
4166                 reg = FDI_TX_CTL(pipe);
4167                 temp = I915_READ(reg);
4168                 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
4169                 temp &= ~FDI_TX_ENABLE;
4170                 I915_WRITE(reg, temp);
4171
4172                 reg = FDI_RX_CTL(pipe);
4173                 temp = I915_READ(reg);
4174                 temp &= ~FDI_LINK_TRAIN_AUTO;
4175                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4176                 temp &= ~FDI_RX_ENABLE;
4177                 I915_WRITE(reg, temp);
4178
4179                 /* enable CPU FDI TX and PCH FDI RX */
4180                 reg = FDI_TX_CTL(pipe);
4181                 temp = I915_READ(reg);
4182                 temp &= ~FDI_DP_PORT_WIDTH_MASK;
4183                 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
4184                 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
4185                 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4186                 temp |= snb_b_fdi_train_param[j/2];
4187                 temp |= FDI_COMPOSITE_SYNC;
4188                 I915_WRITE(reg, temp | FDI_TX_ENABLE);
4189
4190                 I915_WRITE(FDI_RX_MISC(pipe),
4191                            FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
4192
4193                 reg = FDI_RX_CTL(pipe);
4194                 temp = I915_READ(reg);
4195                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4196                 temp |= FDI_COMPOSITE_SYNC;
4197                 I915_WRITE(reg, temp | FDI_RX_ENABLE);
4198
4199                 POSTING_READ(reg);
4200                 udelay(1); /* should be 0.5us */
4201
4202                 for (i = 0; i < 4; i++) {
4203                         reg = FDI_RX_IIR(pipe);
4204                         temp = I915_READ(reg);
4205                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4206
4207                         if (temp & FDI_RX_BIT_LOCK ||
4208                             (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
4209                                 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4210                                 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
4211                                               i);
4212                                 break;
4213                         }
4214                         udelay(1); /* should be 0.5us */
4215                 }
4216                 if (i == 4) {
4217                         DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
4218                         continue;
4219                 }
4220
4221                 /* Train 2 */
4222                 reg = FDI_TX_CTL(pipe);
4223                 temp = I915_READ(reg);
4224                 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
4225                 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
4226                 I915_WRITE(reg, temp);
4227
4228                 reg = FDI_RX_CTL(pipe);
4229                 temp = I915_READ(reg);
4230                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4231                 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
4232                 I915_WRITE(reg, temp);
4233
4234                 POSTING_READ(reg);
4235                 udelay(2); /* should be 1.5us */
4236
4237                 for (i = 0; i < 4; i++) {
4238                         reg = FDI_RX_IIR(pipe);
4239                         temp = I915_READ(reg);
4240                         DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4241
4242                         if (temp & FDI_RX_SYMBOL_LOCK ||
4243                             (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
4244                                 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4245                                 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
4246                                               i);
4247                                 goto train_done;
4248                         }
4249                         udelay(2); /* should be 1.5us */
4250                 }
4251                 if (i == 4)
4252                         DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
4253         }
4254
4255 train_done:
4256         DRM_DEBUG_KMS("FDI train done.\n");
4257 }
4258
4259 static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
4260 {
4261         struct drm_device *dev = intel_crtc->base.dev;
4262         struct drm_i915_private *dev_priv = to_i915(dev);
4263         int pipe = intel_crtc->pipe;
4264         i915_reg_t reg;
4265         u32 temp;
4266
4267         /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
4268         reg = FDI_RX_CTL(pipe);
4269         temp = I915_READ(reg);
4270         temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
4271         temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
4272         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
4273         I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
4274
4275         POSTING_READ(reg);
4276         udelay(200);
4277
4278         /* Switch from Rawclk to PCDclk */
4279         temp = I915_READ(reg);
4280         I915_WRITE(reg, temp | FDI_PCDCLK);
4281
4282         POSTING_READ(reg);
4283         udelay(200);
4284
4285         /* Enable CPU FDI TX PLL, always on for Ironlake */
4286         reg = FDI_TX_CTL(pipe);
4287         temp = I915_READ(reg);
4288         if ((temp & FDI_TX_PLL_ENABLE) == 0) {
4289                 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
4290
4291                 POSTING_READ(reg);
4292                 udelay(100);
4293         }
4294 }
4295
4296 static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
4297 {
4298         struct drm_device *dev = intel_crtc->base.dev;
4299         struct drm_i915_private *dev_priv = to_i915(dev);
4300         int pipe = intel_crtc->pipe;
4301         i915_reg_t reg;
4302         u32 temp;
4303
4304         /* Switch from PCDclk to Rawclk */
4305         reg = FDI_RX_CTL(pipe);
4306         temp = I915_READ(reg);
4307         I915_WRITE(reg, temp & ~FDI_PCDCLK);
4308
4309         /* Disable CPU FDI TX PLL */
4310         reg = FDI_TX_CTL(pipe);
4311         temp = I915_READ(reg);
4312         I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
4313
4314         POSTING_READ(reg);
4315         udelay(100);
4316
4317         reg = FDI_RX_CTL(pipe);
4318         temp = I915_READ(reg);
4319         I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
4320
4321         /* Wait for the clocks to turn off. */
4322         POSTING_READ(reg);
4323         udelay(100);
4324 }
4325
4326 static void ironlake_fdi_disable(struct drm_crtc *crtc)
4327 {
4328         struct drm_device *dev = crtc->dev;
4329         struct drm_i915_private *dev_priv = to_i915(dev);
4330         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4331         int pipe = intel_crtc->pipe;
4332         i915_reg_t reg;
4333         u32 temp;
4334
4335         /* disable CPU FDI tx and PCH FDI rx */
4336         reg = FDI_TX_CTL(pipe);
4337         temp = I915_READ(reg);
4338         I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
4339         POSTING_READ(reg);
4340
4341         reg = FDI_RX_CTL(pipe);
4342         temp = I915_READ(reg);
4343         temp &= ~(0x7 << 16);
4344         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
4345         I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
4346
4347         POSTING_READ(reg);
4348         udelay(100);
4349
4350         /* Ironlake workaround, disable clock pointer after downing FDI */
4351         if (HAS_PCH_IBX(dev_priv))
4352                 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
4353
4354         /* still set train pattern 1 */
4355         reg = FDI_TX_CTL(pipe);
4356         temp = I915_READ(reg);
4357         temp &= ~FDI_LINK_TRAIN_NONE;
4358         temp |= FDI_LINK_TRAIN_PATTERN_1;
4359         I915_WRITE(reg, temp);
4360
4361         reg = FDI_RX_CTL(pipe);
4362         temp = I915_READ(reg);
4363         if (HAS_PCH_CPT(dev_priv)) {
4364                 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4365                 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4366         } else {
4367                 temp &= ~FDI_LINK_TRAIN_NONE;
4368                 temp |= FDI_LINK_TRAIN_PATTERN_1;
4369         }
4370         /* BPC in FDI rx is consistent with that in PIPECONF */
4371         temp &= ~(0x07 << 16);
4372         temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
4373         I915_WRITE(reg, temp);
4374
4375         POSTING_READ(reg);
4376         udelay(100);
4377 }
4378
4379 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
4380 {
4381         struct drm_crtc *crtc;
4382         bool cleanup_done;
4383
4384         drm_for_each_crtc(crtc, &dev_priv->drm) {
4385                 struct drm_crtc_commit *commit;
4386                 spin_lock(&crtc->commit_lock);
4387                 commit = list_first_entry_or_null(&crtc->commit_list,
4388                                                   struct drm_crtc_commit, commit_entry);
4389                 cleanup_done = commit ?
4390                         try_wait_for_completion(&commit->cleanup_done) : true;
4391                 spin_unlock(&crtc->commit_lock);
4392
4393                 if (cleanup_done)
4394                         continue;
4395
4396                 drm_crtc_wait_one_vblank(crtc);
4397
4398                 return true;
4399         }
4400
4401         return false;
4402 }
4403
4404 void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
4405 {
4406         u32 temp;
4407
4408         I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
4409
4410         mutex_lock(&dev_priv->sb_lock);
4411
4412         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
4413         temp |= SBI_SSCCTL_DISABLE;
4414         intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
4415
4416         mutex_unlock(&dev_priv->sb_lock);
4417 }
4418
4419 /* Program iCLKIP clock to the desired frequency */
4420 static void lpt_program_iclkip(struct intel_crtc *crtc)
4421 {
4422         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4423         int clock = crtc->config->base.adjusted_mode.crtc_clock;
4424         u32 divsel, phaseinc, auxdiv, phasedir = 0;
4425         u32 temp;
4426
4427         lpt_disable_iclkip(dev_priv);
4428
4429         /* The iCLK virtual clock root frequency is in MHz,
4430          * but the adjusted_mode->crtc_clock in in KHz. To get the
4431          * divisors, it is necessary to divide one by another, so we
4432          * convert the virtual clock precision to KHz here for higher
4433          * precision.
4434          */
4435         for (auxdiv = 0; auxdiv < 2; auxdiv++) {
4436                 u32 iclk_virtual_root_freq = 172800 * 1000;
4437                 u32 iclk_pi_range = 64;
4438                 u32 desired_divisor;
4439
4440                 desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
4441                                                     clock << auxdiv);
4442                 divsel = (desired_divisor / iclk_pi_range) - 2;
4443                 phaseinc = desired_divisor % iclk_pi_range;
4444
4445                 /*
4446                  * Near 20MHz is a corner case which is
4447                  * out of range for the 7-bit divisor
4448                  */
4449                 if (divsel <= 0x7f)
4450                         break;
4451         }
4452
4453         /* This should not happen with any sane values */
4454         WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
4455                 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
4456         WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
4457                 ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
4458
4459         DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
4460                         clock,
4461                         auxdiv,
4462                         divsel,
4463                         phasedir,
4464                         phaseinc);
4465
4466         mutex_lock(&dev_priv->sb_lock);
4467
4468         /* Program SSCDIVINTPHASE6 */
4469         temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
4470         temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
4471         temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
4472         temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
4473         temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
4474         temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
4475         temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
4476         intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
4477
4478         /* Program SSCAUXDIV */
4479         temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
4480         temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
4481         temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
4482         intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
4483
4484         /* Enable modulator and associated divider */
4485         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
4486         temp &= ~SBI_SSCCTL_DISABLE;
4487         intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
4488
4489         mutex_unlock(&dev_priv->sb_lock);
4490
4491         /* Wait for initialization time */
4492         udelay(24);
4493
4494         I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
4495 }
4496
4497 int lpt_get_iclkip(struct drm_i915_private *dev_priv)
4498 {
4499         u32 divsel, phaseinc, auxdiv;
4500         u32 iclk_virtual_root_freq = 172800 * 1000;
4501         u32 iclk_pi_range = 64;
4502         u32 desired_divisor;
4503         u32 temp;
4504
4505         if ((I915_READ(PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
4506                 return 0;
4507
4508         mutex_lock(&dev_priv->sb_lock);
4509
4510         temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
4511         if (temp & SBI_SSCCTL_DISABLE) {
4512                 mutex_unlock(&dev_priv->sb_lock);
4513                 return 0;
4514         }
4515
4516         temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
4517         divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
4518                 SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
4519         phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
4520                 SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
4521
4522         temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
4523         auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
4524                 SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
4525
4526         mutex_unlock(&dev_priv->sb_lock);
4527
4528         desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
4529
4530         return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
4531                                  desired_divisor << auxdiv);
4532 }
4533
4534 static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
4535                                                 enum pipe pch_transcoder)
4536 {
4537         struct drm_device *dev = crtc->base.dev;
4538         struct drm_i915_private *dev_priv = to_i915(dev);
4539         enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
4540
4541         I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
4542                    I915_READ(HTOTAL(cpu_transcoder)));
4543         I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
4544                    I915_READ(HBLANK(cpu_transcoder)));
4545         I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
4546                    I915_READ(HSYNC(cpu_transcoder)));
4547
4548         I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
4549                    I915_READ(VTOTAL(cpu_transcoder)));
4550         I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
4551                    I915_READ(VBLANK(cpu_transcoder)));
4552         I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
4553                    I915_READ(VSYNC(cpu_transcoder)));
4554         I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
4555                    I915_READ(VSYNCSHIFT(cpu_transcoder)));
4556 }
4557
4558 static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable)
4559 {
4560         struct drm_i915_private *dev_priv = to_i915(dev);
4561         uint32_t temp;
4562
4563         temp = I915_READ(SOUTH_CHICKEN1);
4564         if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
4565                 return;
4566
4567         WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
4568         WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
4569
4570         temp &= ~FDI_BC_BIFURCATION_SELECT;
4571         if (enable)
4572                 temp |= FDI_BC_BIFURCATION_SELECT;
4573
4574         DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis");
4575         I915_WRITE(SOUTH_CHICKEN1, temp);
4576         POSTING_READ(SOUTH_CHICKEN1);
4577 }
4578
4579 static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
4580 {
4581         struct drm_device *dev = intel_crtc->base.dev;
4582
4583         switch (intel_crtc->pipe) {
4584         case PIPE_A:
4585                 break;
4586         case PIPE_B:
4587                 if (intel_crtc->config->fdi_lanes > 2)
4588                         cpt_set_fdi_bc_bifurcation(dev, false);
4589                 else
4590                         cpt_set_fdi_bc_bifurcation(dev, true);
4591
4592                 break;
4593         case PIPE_C:
4594                 cpt_set_fdi_bc_bifurcation(dev, true);
4595
4596                 break;
4597         default:
4598                 BUG();
4599         }
4600 }
4601
4602 /*
4603  * Finds the encoder associated with the given CRTC. This can only be
4604  * used when we know that the CRTC isn't feeding multiple encoders!
4605  */
4606 static struct intel_encoder *
4607 intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
4608                            const struct intel_crtc_state *crtc_state)
4609 {
4610         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
4611         const struct drm_connector_state *connector_state;
4612         const struct drm_connector *connector;
4613         struct intel_encoder *encoder = NULL;
4614         int num_encoders = 0;
4615         int i;
4616
4617         for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
4618                 if (connector_state->crtc != &crtc->base)
4619                         continue;
4620
4621                 encoder = to_intel_encoder(connector_state->best_encoder);
4622                 num_encoders++;
4623         }
4624
4625         WARN(num_encoders != 1, "%d encoders for pipe %c\n",
4626              num_encoders, pipe_name(crtc->pipe));
4627
4628         return encoder;
4629 }
4630
4631 /*
4632  * Enable PCH resources required for PCH ports:
4633  *   - PCH PLLs
4634  *   - FDI training & RX/TX
4635  *   - update transcoder timings
4636  *   - DP transcoding bits
4637  *   - transcoder
4638  */
4639 static void ironlake_pch_enable(const struct intel_atomic_state *state,
4640                                 const struct intel_crtc_state *crtc_state)
4641 {
4642         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
4643         struct drm_device *dev = crtc->base.dev;
4644         struct drm_i915_private *dev_priv = to_i915(dev);
4645         int pipe = crtc->pipe;
4646         u32 temp;
4647
4648         assert_pch_transcoder_disabled(dev_priv, pipe);
4649
4650         if (IS_IVYBRIDGE(dev_priv))
4651                 ivybridge_update_fdi_bc_bifurcation(crtc);
4652
4653         /* Write the TU size bits before fdi link training, so that error
4654          * detection works. */
4655         I915_WRITE(FDI_RX_TUSIZE1(pipe),
4656                    I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
4657
4658         /* For PCH output, training FDI link */
4659         dev_priv->display.fdi_link_train(crtc, crtc_state);
4660
4661         /* We need to program the right clock selection before writing the pixel
4662          * mutliplier into the DPLL. */
4663         if (HAS_PCH_CPT(dev_priv)) {
4664                 u32 sel;
4665
4666                 temp = I915_READ(PCH_DPLL_SEL);
4667                 temp |= TRANS_DPLL_ENABLE(pipe);
4668                 sel = TRANS_DPLLB_SEL(pipe);
4669                 if (crtc_state->shared_dpll ==
4670                     intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
4671                         temp |= sel;
4672                 else
4673                         temp &= ~sel;
4674                 I915_WRITE(PCH_DPLL_SEL, temp);
4675         }
4676
4677         /* XXX: pch pll's can be enabled any time before we enable the PCH
4678          * transcoder, and we actually should do this to not upset any PCH
4679          * transcoder that already use the clock when we share it.
4680          *
4681          * Note that enable_shared_dpll tries to do the right thing, but
4682          * get_shared_dpll unconditionally resets the pll - we need that to have
4683          * the right LVDS enable sequence. */
4684         intel_enable_shared_dpll(crtc);
4685
4686         /* set transcoder timing, panel must allow it */
4687         assert_panel_unlocked(dev_priv, pipe);
4688         ironlake_pch_transcoder_set_timings(crtc, pipe);
4689
4690         intel_fdi_normal_train(crtc);
4691
4692         /* For PCH DP, enable TRANS_DP_CTL */
4693         if (HAS_PCH_CPT(dev_priv) &&
4694             intel_crtc_has_dp_encoder(crtc_state)) {
4695                 const struct drm_display_mode *adjusted_mode =
4696                         &crtc_state->base.adjusted_mode;
4697                 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
4698                 i915_reg_t reg = TRANS_DP_CTL(pipe);
4699                 enum port port;
4700
4701                 temp = I915_READ(reg);
4702                 temp &= ~(TRANS_DP_PORT_SEL_MASK |
4703                           TRANS_DP_SYNC_MASK |
4704                           TRANS_DP_BPC_MASK);
4705                 temp |= TRANS_DP_OUTPUT_ENABLE;
4706                 temp |= bpc << 9; /* same format but at 11:9 */
4707
4708                 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
4709                         temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
4710                 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
4711                         temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
4712
4713                 port = intel_get_crtc_new_encoder(state, crtc_state)->port;
4714                 WARN_ON(port < PORT_B || port > PORT_D);
4715                 temp |= TRANS_DP_PORT_SEL(port);
4716
4717                 I915_WRITE(reg, temp);
4718         }
4719
4720         ironlake_enable_pch_transcoder(dev_priv, pipe);
4721 }
4722
4723 static void lpt_pch_enable(const struct intel_atomic_state *state,
4724                            const struct intel_crtc_state *crtc_state)
4725 {
4726         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
4727         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4728         enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
4729
4730         assert_pch_transcoder_disabled(dev_priv, PIPE_A);
4731
4732         lpt_program_iclkip(crtc);
4733
4734         /* Set transcoder timing. */
4735         ironlake_pch_transcoder_set_timings(crtc, PIPE_A);
4736
4737         lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
4738 }
4739
4740 static void cpt_verify_modeset(struct drm_device *dev, int pipe)
4741 {
4742         struct drm_i915_private *dev_priv = to_i915(dev);
4743         i915_reg_t dslreg = PIPEDSL(pipe);
4744         u32 temp;
4745
4746         temp = I915_READ(dslreg);
4747         udelay(500);
4748         if (wait_for(I915_READ(dslreg) != temp, 5)) {
4749                 if (wait_for(I915_READ(dslreg) != temp, 5))
4750                         DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
4751         }
4752 }
4753
4754 /*
4755  * The hardware phase 0.0 refers to the center of the pixel.
4756  * We want to start from the top/left edge which is phase
4757  * -0.5. That matches how the hardware calculates the scaling
4758  * factors (from top-left of the first pixel to bottom-right
4759  * of the last pixel, as opposed to the pixel centers).
4760  *
4761  * For 4:2:0 subsampled chroma planes we obviously have to
4762  * adjust that so that the chroma sample position lands in
4763  * the right spot.
4764  *
4765  * Note that for packed YCbCr 4:2:2 formats there is no way to
4766  * control chroma siting. The hardware simply replicates the
4767  * chroma samples for both of the luma samples, and thus we don't
4768  * actually get the expected MPEG2 chroma siting convention :(
4769  * The same behaviour is observed on pre-SKL platforms as well.
4770  */
4771 u16 skl_scaler_calc_phase(int sub, bool chroma_cosited)
4772 {
4773         int phase = -0x8000;
4774         u16 trip = 0;
4775
4776         if (chroma_cosited)
4777                 phase += (sub - 1) * 0x8000 / sub;
4778
4779         if (phase < 0)
4780                 phase = 0x10000 + phase;
4781         else
4782                 trip = PS_PHASE_TRIP;
4783
4784         return ((phase >> 2) & PS_PHASE_MASK) | trip;
4785 }
4786
4787 static int
4788 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
4789                   unsigned int scaler_user, int *scaler_id,
4790                   int src_w, int src_h, int dst_w, int dst_h,
4791                   bool plane_scaler_check,
4792                   uint32_t pixel_format)
4793 {
4794         struct intel_crtc_scaler_state *scaler_state =
4795                 &crtc_state->scaler_state;
4796         struct intel_crtc *intel_crtc =
4797                 to_intel_crtc(crtc_state->base.crtc);
4798         struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
4799         const struct drm_display_mode *adjusted_mode =
4800                 &crtc_state->base.adjusted_mode;
4801         int need_scaling;
4802
4803         /*
4804          * Src coordinates are already rotated by 270 degrees for
4805          * the 90/270 degree plane rotation cases (to match the
4806          * GTT mapping), hence no need to account for rotation here.
4807          */
4808         need_scaling = src_w != dst_w || src_h != dst_h;
4809
4810         if (plane_scaler_check)
4811                 if (pixel_format == DRM_FORMAT_NV12)
4812                         need_scaling = true;
4813
4814         if (crtc_state->ycbcr420 && scaler_user == SKL_CRTC_INDEX)
4815                 need_scaling = true;
4816
4817         /*
4818          * Scaling/fitting not supported in IF-ID mode in GEN9+
4819          * TODO: Interlace fetch mode doesn't support YUV420 planar formats.
4820          * Once NV12 is enabled, handle it here while allocating scaler
4821          * for NV12.
4822          */
4823         if (INTEL_GEN(dev_priv) >= 9 && crtc_state->base.enable &&
4824             need_scaling && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
4825                 DRM_DEBUG_KMS("Pipe/Plane scaling not supported with IF-ID mode\n");
4826                 return -EINVAL;
4827         }
4828
4829         /*
4830          * if plane is being disabled or scaler is no more required or force detach
4831          *  - free scaler binded to this plane/crtc
4832          *  - in order to do this, update crtc->scaler_usage
4833          *
4834          * Here scaler state in crtc_state is set free so that
4835          * scaler can be assigned to other user. Actual register
4836          * update to free the scaler is done in plane/panel-fit programming.
4837          * For this purpose crtc/plane_state->scaler_id isn't reset here.
4838          */
4839         if (force_detach || !need_scaling) {
4840                 if (*scaler_id >= 0) {
4841                         scaler_state->scaler_users &= ~(1 << scaler_user);
4842                         scaler_state->scalers[*scaler_id].in_use = 0;
4843
4844                         DRM_DEBUG_KMS("scaler_user index %u.%u: "
4845                                 "Staged freeing scaler id %d scaler_users = 0x%x\n",
4846                                 intel_crtc->pipe, scaler_user, *scaler_id,
4847                                 scaler_state->scaler_users);
4848                         *scaler_id = -1;
4849                 }
4850                 return 0;
4851         }
4852
4853         if (plane_scaler_check && pixel_format == DRM_FORMAT_NV12 &&
4854             (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
4855                 DRM_DEBUG_KMS("NV12: src dimensions not met\n");
4856                 return -EINVAL;
4857         }
4858
4859         /* range checks */
4860         if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
4861             dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
4862             (IS_GEN11(dev_priv) &&
4863              (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H ||
4864               dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) ||
4865             (!IS_GEN11(dev_priv) &&
4866              (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
4867               dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) {
4868                 DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
4869                         "size is out of scaler range\n",
4870                         intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
4871                 return -EINVAL;
4872         }
4873
4874         /* mark this plane as a scaler user in crtc_state */
4875         scaler_state->scaler_users |= (1 << scaler_user);
4876         DRM_DEBUG_KMS("scaler_user index %u.%u: "
4877                 "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
4878                 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
4879                 scaler_state->scaler_users);
4880
4881         return 0;
4882 }
4883
4884 /**
4885  * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
4886  *
4887  * @state: crtc's scaler state
4888  *
4889  * Return
4890  *     0 - scaler_usage updated successfully
4891  *    error - requested scaling cannot be supported or other error condition
4892  */
4893 int skl_update_scaler_crtc(struct intel_crtc_state *state)
4894 {
4895         const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
4896
4897         return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
4898                                  &state->scaler_state.scaler_id,
4899                                  state->pipe_src_w, state->pipe_src_h,
4900                                  adjusted_mode->crtc_hdisplay,
4901                                  adjusted_mode->crtc_vdisplay, false, 0);
4902 }
4903
4904 /**
4905  * skl_update_scaler_plane - Stages update to scaler state for a given plane.
4906  * @crtc_state: crtc's scaler state
4907  * @plane_state: atomic plane state to update
4908  *
4909  * Return
4910  *     0 - scaler_usage updated successfully
4911  *    error - requested scaling cannot be supported or other error condition
4912  */
4913 static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
4914                                    struct intel_plane_state *plane_state)
4915 {
4916
4917         struct intel_plane *intel_plane =
4918                 to_intel_plane(plane_state->base.plane);
4919         struct drm_framebuffer *fb = plane_state->base.fb;
4920         int ret;
4921
4922         bool force_detach = !fb || !plane_state->base.visible;
4923
4924         ret = skl_update_scaler(crtc_state, force_detach,
4925                                 drm_plane_index(&intel_plane->base),
4926                                 &plane_state->scaler_id,
4927                                 drm_rect_width(&plane_state->base.src) >> 16,
4928                                 drm_rect_height(&plane_state->base.src) >> 16,
4929                                 drm_rect_width(&plane_state->base.dst),
4930                                 drm_rect_height(&plane_state->base.dst),
4931                                 fb ? true : false, fb ? fb->format->format : 0);
4932
4933         if (ret || plane_state->scaler_id < 0)
4934                 return ret;
4935
4936         /* check colorkey */
4937         if (plane_state->ckey.flags) {
4938                 DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed",
4939                               intel_plane->base.base.id,
4940                               intel_plane->base.name);
4941                 return -EINVAL;
4942         }
4943
4944         /* Check src format */
4945         switch (fb->format->format) {
4946         case DRM_FORMAT_RGB565:
4947         case DRM_FORMAT_XBGR8888:
4948         case DRM_FORMAT_XRGB8888:
4949         case DRM_FORMAT_ABGR8888:
4950         case DRM_FORMAT_ARGB8888:
4951         case DRM_FORMAT_XRGB2101010:
4952         case DRM_FORMAT_XBGR2101010:
4953         case DRM_FORMAT_YUYV:
4954         case DRM_FORMAT_YVYU:
4955         case DRM_FORMAT_UYVY:
4956         case DRM_FORMAT_VYUY:
4957         case DRM_FORMAT_NV12:
4958                 break;
4959         default:
4960                 DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
4961                               intel_plane->base.base.id, intel_plane->base.name,
4962                               fb->base.id, fb->format->format);
4963                 return -EINVAL;
4964         }
4965
4966         return 0;
4967 }
4968
4969 static void skylake_scaler_disable(struct intel_crtc *crtc)
4970 {
4971         int i;
4972
4973         for (i = 0; i < crtc->num_scalers; i++)
4974                 skl_detach_scaler(crtc, i);
4975 }
4976
4977 static void skylake_pfit_enable(struct intel_crtc *crtc)
4978 {
4979         struct drm_device *dev = crtc->base.dev;
4980         struct drm_i915_private *dev_priv = to_i915(dev);
4981         int pipe = crtc->pipe;
4982         struct intel_crtc_scaler_state *scaler_state =
4983                 &crtc->config->scaler_state;
4984
4985         if (crtc->config->pch_pfit.enabled) {
4986                 u16 uv_rgb_hphase, uv_rgb_vphase;
4987                 int id;
4988
4989                 if (WARN_ON(crtc->config->scaler_state.scaler_id < 0))
4990                         return;
4991
4992                 uv_rgb_hphase = skl_scaler_calc_phase(1, false);
4993                 uv_rgb_vphase = skl_scaler_calc_phase(1, false);
4994
4995                 id = scaler_state->scaler_id;
4996                 I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
4997                         PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
4998                 I915_WRITE_FW(SKL_PS_VPHASE(pipe, id),
4999                               PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
5000                 I915_WRITE_FW(SKL_PS_HPHASE(pipe, id),
5001                               PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
5002                 I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos);
5003                 I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size);
5004         }
5005 }
5006
5007 static void ironlake_pfit_enable(struct intel_crtc *crtc)
5008 {
5009         struct drm_device *dev = crtc->base.dev;
5010         struct drm_i915_private *dev_priv = to_i915(dev);
5011         int pipe = crtc->pipe;
5012
5013         if (crtc->config->pch_pfit.enabled) {
5014                 /* Force use of hard-coded filter coefficients
5015                  * as some pre-programmed values are broken,
5016                  * e.g. x201.
5017                  */
5018                 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
5019                         I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
5020                                                  PF_PIPE_SEL_IVB(pipe));
5021                 else
5022                         I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
5023                 I915_WRITE(PF_WIN_POS(pipe), crtc->config->pch_pfit.pos);
5024                 I915_WRITE(PF_WIN_SZ(pipe), crtc->config->pch_pfit.size);
5025         }
5026 }
5027
5028 void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
5029 {
5030         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5031         struct drm_device *dev = crtc->base.dev;
5032         struct drm_i915_private *dev_priv = to_i915(dev);
5033
5034         if (!crtc_state->ips_enabled)
5035                 return;
5036
5037         /*
5038          * We can only enable IPS after we enable a plane and wait for a vblank
5039          * This function is called from post_plane_update, which is run after
5040          * a vblank wait.
5041          */
5042         WARN_ON(!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
5043
5044         if (IS_BROADWELL(dev_priv)) {
5045                 mutex_lock(&dev_priv->pcu_lock);
5046                 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
5047                                                 IPS_ENABLE | IPS_PCODE_CONTROL));
5048                 mutex_unlock(&dev_priv->pcu_lock);
5049                 /* Quoting Art Runyan: "its not safe to expect any particular
5050                  * value in IPS_CTL bit 31 after enabling IPS through the
5051                  * mailbox." Moreover, the mailbox may return a bogus state,
5052                  * so we need to just enable it and continue on.
5053                  */
5054         } else {
5055                 I915_WRITE(IPS_CTL, IPS_ENABLE);
5056                 /* The bit only becomes 1 in the next vblank, so this wait here
5057                  * is essentially intel_wait_for_vblank. If we don't have this
5058                  * and don't wait for vblanks until the end of crtc_enable, then
5059                  * the HW state readout code will complain that the expected
5060                  * IPS_CTL value is not the one we read. */
5061                 if (intel_wait_for_register(dev_priv,
5062                                             IPS_CTL, IPS_ENABLE, IPS_ENABLE,
5063                                             50))
5064                         DRM_ERROR("Timed out waiting for IPS enable\n");
5065         }
5066 }
5067
5068 void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
5069 {
5070         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5071         struct drm_device *dev = crtc->base.dev;
5072         struct drm_i915_private *dev_priv = to_i915(dev);
5073
5074         if (!crtc_state->ips_enabled)
5075                 return;
5076
5077         if (IS_BROADWELL(dev_priv)) {
5078                 mutex_lock(&dev_priv->pcu_lock);
5079                 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
5080                 mutex_unlock(&dev_priv->pcu_lock);
5081                 /* wait for pcode to finish disabling IPS, which may take up to 42ms */
5082                 if (intel_wait_for_register(dev_priv,
5083                                             IPS_CTL, IPS_ENABLE, 0,
5084                                             42))
5085                         DRM_ERROR("Timed out waiting for IPS disable\n");
5086         } else {
5087                 I915_WRITE(IPS_CTL, 0);
5088                 POSTING_READ(IPS_CTL);
5089         }
5090
5091         /* We need to wait for a vblank before we can disable the plane. */
5092         intel_wait_for_vblank(dev_priv, crtc->pipe);
5093 }
5094
5095 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
5096 {
5097         if (intel_crtc->overlay) {
5098                 struct drm_device *dev = intel_crtc->base.dev;
5099
5100                 mutex_lock(&dev->struct_mutex);
5101                 (void) intel_overlay_switch_off(intel_crtc->overlay);
5102                 mutex_unlock(&dev->struct_mutex);
5103         }
5104
5105         /* Let userspace switch the overlay on again. In most cases userspace
5106          * has to recompute where to put it anyway.
5107          */
5108 }
5109
5110 /**
5111  * intel_post_enable_primary - Perform operations after enabling primary plane
5112  * @crtc: the CRTC whose primary plane was just enabled
5113  * @new_crtc_state: the enabling state
5114  *
5115  * Performs potentially sleeping operations that must be done after the primary
5116  * plane is enabled, such as updating FBC and IPS.  Note that this may be
5117  * called due to an explicit primary plane update, or due to an implicit
5118  * re-enable that is caused when a sprite plane is updated to no longer
5119  * completely hide the primary plane.
5120  */
5121 static void
5122 intel_post_enable_primary(struct drm_crtc *crtc,
5123                           const struct intel_crtc_state *new_crtc_state)
5124 {
5125         struct drm_device *dev = crtc->dev;
5126         struct drm_i915_private *dev_priv = to_i915(dev);
5127         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5128         int pipe = intel_crtc->pipe;
5129
5130         /*
5131          * Gen2 reports pipe underruns whenever all planes are disabled.
5132          * So don't enable underrun reporting before at least some planes
5133          * are enabled.
5134          * FIXME: Need to fix the logic to work when we turn off all planes
5135          * but leave the pipe running.
5136          */
5137         if (IS_GEN2(dev_priv))
5138                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5139
5140         /* Underruns don't always raise interrupts, so check manually. */
5141         intel_check_cpu_fifo_underruns(dev_priv);
5142         intel_check_pch_fifo_underruns(dev_priv);
5143 }
5144
5145 /* FIXME get rid of this and use pre_plane_update */
5146 static void
5147 intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
5148 {
5149         struct drm_device *dev = crtc->dev;
5150         struct drm_i915_private *dev_priv = to_i915(dev);
5151         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5152         int pipe = intel_crtc->pipe;
5153
5154         /*
5155          * Gen2 reports pipe underruns whenever all planes are disabled.
5156          * So disable underrun reporting before all the planes get disabled.
5157          */
5158         if (IS_GEN2(dev_priv))
5159                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5160
5161         hsw_disable_ips(to_intel_crtc_state(crtc->state));
5162
5163         /*
5164          * Vblank time updates from the shadow to live plane control register
5165          * are blocked if the memory self-refresh mode is active at that
5166          * moment. So to make sure the plane gets truly disabled, disable
5167          * first the self-refresh mode. The self-refresh enable bit in turn
5168          * will be checked/applied by the HW only at the next frame start
5169          * event which is after the vblank start event, so we need to have a
5170          * wait-for-vblank between disabling the plane and the pipe.
5171          */
5172         if (HAS_GMCH_DISPLAY(dev_priv) &&
5173             intel_set_memory_cxsr(dev_priv, false))
5174                 intel_wait_for_vblank(dev_priv, pipe);
5175 }
5176
5177 static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
5178                                        const struct intel_crtc_state *new_crtc_state)
5179 {
5180         if (!old_crtc_state->ips_enabled)
5181                 return false;
5182
5183         if (needs_modeset(&new_crtc_state->base))
5184                 return true;
5185
5186         return !new_crtc_state->ips_enabled;
5187 }
5188
5189 static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
5190                                        const struct intel_crtc_state *new_crtc_state)
5191 {
5192         if (!new_crtc_state->ips_enabled)
5193                 return false;
5194
5195         if (needs_modeset(&new_crtc_state->base))
5196                 return true;
5197
5198         /*
5199          * We can't read out IPS on broadwell, assume the worst and
5200          * forcibly enable IPS on the first fastset.
5201          */
5202         if (new_crtc_state->update_pipe &&
5203             old_crtc_state->base.adjusted_mode.private_flags & I915_MODE_FLAG_INHERITED)
5204                 return true;
5205
5206         return !old_crtc_state->ips_enabled;
5207 }
5208
5209 static bool needs_nv12_wa(struct drm_i915_private *dev_priv,
5210                           const struct intel_crtc_state *crtc_state)
5211 {
5212         if (!crtc_state->nv12_planes)
5213                 return false;
5214
5215         if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
5216                 return false;
5217
5218         if ((INTEL_GEN(dev_priv) == 9 && !IS_GEMINILAKE(dev_priv)) ||
5219             IS_CANNONLAKE(dev_priv))
5220                 return true;
5221
5222         return false;
5223 }
5224
5225 static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
5226 {
5227         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
5228         struct drm_device *dev = crtc->base.dev;
5229         struct drm_i915_private *dev_priv = to_i915(dev);
5230         struct drm_atomic_state *old_state = old_crtc_state->base.state;
5231         struct intel_crtc_state *pipe_config =
5232                 intel_atomic_get_new_crtc_state(to_intel_atomic_state(old_state),
5233                                                 crtc);
5234         struct drm_plane *primary = crtc->base.primary;
5235         struct drm_plane_state *old_primary_state =
5236                 drm_atomic_get_old_plane_state(old_state, primary);
5237
5238         intel_frontbuffer_flip(to_i915(crtc->base.dev), pipe_config->fb_bits);
5239
5240         if (pipe_config->update_wm_post && pipe_config->base.active)
5241                 intel_update_watermarks(crtc);
5242
5243         if (hsw_post_update_enable_ips(old_crtc_state, pipe_config))
5244                 hsw_enable_ips(pipe_config);
5245
5246         if (old_primary_state) {
5247                 struct drm_plane_state *new_primary_state =
5248                         drm_atomic_get_new_plane_state(old_state, primary);
5249
5250                 intel_fbc_post_update(crtc);
5251
5252                 if (new_primary_state->visible &&
5253                     (needs_modeset(&pipe_config->base) ||
5254                      !old_primary_state->visible))
5255                         intel_post_enable_primary(&crtc->base, pipe_config);
5256         }
5257
5258         /* Display WA 827 */
5259         if (needs_nv12_wa(dev_priv, old_crtc_state) &&
5260             !needs_nv12_wa(dev_priv, pipe_config)) {
5261                 skl_wa_clkgate(dev_priv, crtc->pipe, false);
5262                 skl_wa_528(dev_priv, crtc->pipe, false);
5263         }
5264 }
5265
5266 static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
5267                                    struct intel_crtc_state *pipe_config)
5268 {
5269         struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
5270         struct drm_device *dev = crtc->base.dev;
5271         struct drm_i915_private *dev_priv = to_i915(dev);
5272         struct drm_atomic_state *old_state = old_crtc_state->base.state;
5273         struct drm_plane *primary = crtc->base.primary;
5274         struct drm_plane_state *old_primary_state =
5275                 drm_atomic_get_old_plane_state(old_state, primary);
5276         bool modeset = needs_modeset(&pipe_config->base);
5277         struct intel_atomic_state *old_intel_state =
5278                 to_intel_atomic_state(old_state);
5279
5280         if (hsw_pre_update_disable_ips(old_crtc_state, pipe_config))
5281                 hsw_disable_ips(old_crtc_state);
5282
5283         if (old_primary_state) {
5284                 struct intel_plane_state *new_primary_state =
5285                         intel_atomic_get_new_plane_state(old_intel_state,
5286                                                          to_intel_plane(primary));
5287
5288                 intel_fbc_pre_update(crtc, pipe_config, new_primary_state);
5289                 /*
5290                  * Gen2 reports pipe underruns whenever all planes are disabled.
5291                  * So disable underrun reporting before all the planes get disabled.
5292                  */
5293                 if (IS_GEN2(dev_priv) && old_primary_state->visible &&
5294                     (modeset || !new_primary_state->base.visible))
5295                         intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
5296         }
5297
5298         /* Display WA 827 */
5299         if (!needs_nv12_wa(dev_priv, old_crtc_state) &&
5300             needs_nv12_wa(dev_priv, pipe_config)) {
5301                 skl_wa_clkgate(dev_priv, crtc->pipe, true);
5302                 skl_wa_528(dev_priv, crtc->pipe, true);
5303         }
5304
5305         /*
5306          * Vblank time updates from the shadow to live plane control register
5307          * are blocked if the memory self-refresh mode is active at that
5308          * moment. So to make sure the plane gets truly disabled, disable
5309          * first the self-refresh mode. The self-refresh enable bit in turn
5310          * will be checked/applied by the HW only at the next frame start
5311          * event which is after the vblank start event, so we need to have a
5312          * wait-for-vblank between disabling the plane and the pipe.
5313          */
5314         if (HAS_GMCH_DISPLAY(dev_priv) && old_crtc_state->base.active &&
5315             pipe_config->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
5316                 intel_wait_for_vblank(dev_priv, crtc->pipe);
5317
5318         /*
5319          * IVB workaround: must disable low power watermarks for at least
5320          * one frame before enabling scaling.  LP watermarks can be re-enabled
5321          * when scaling is disabled.
5322          *
5323          * WaCxSRDisabledForSpriteScaling:ivb
5324          */
5325         if (pipe_config->disable_lp_wm && ilk_disable_lp_wm(dev))
5326                 intel_wait_for_vblank(dev_priv, crtc->pipe);
5327
5328         /*
5329          * If we're doing a modeset, we're done.  No need to do any pre-vblank
5330          * watermark programming here.
5331          */
5332         if (needs_modeset(&pipe_config->base))
5333                 return;
5334
5335         /*
5336          * For platforms that support atomic watermarks, program the
5337          * 'intermediate' watermarks immediately.  On pre-gen9 platforms, these
5338          * will be the intermediate values that are safe for both pre- and
5339          * post- vblank; when vblank happens, the 'active' values will be set
5340          * to the final 'target' values and we'll do this again to get the
5341          * optimal watermarks.  For gen9+ platforms, the values we program here
5342          * will be the final target values which will get automatically latched
5343          * at vblank time; no further programming will be necessary.
5344          *
5345          * If a platform hasn't been transitioned to atomic watermarks yet,
5346          * we'll continue to update watermarks the old way, if flags tell
5347          * us to.
5348          */
5349         if (dev_priv->display.initial_watermarks != NULL)
5350                 dev_priv->display.initial_watermarks(old_intel_state,
5351                                                      pipe_config);
5352         else if (pipe_config->update_wm_pre)
5353                 intel_update_watermarks(crtc);
5354 }
5355
5356 static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask)
5357 {
5358         struct drm_device *dev = crtc->dev;
5359         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5360         struct drm_plane *p;
5361         int pipe = intel_crtc->pipe;
5362
5363         intel_crtc_dpms_overlay_disable(intel_crtc);
5364
5365         drm_for_each_plane_mask(p, dev, plane_mask)
5366                 to_intel_plane(p)->disable_plane(to_intel_plane(p), intel_crtc);
5367
5368         /*
5369          * FIXME: Once we grow proper nuclear flip support out of this we need
5370          * to compute the mask of flip planes precisely. For the time being
5371          * consider this a flip to a NULL plane.
5372          */
5373         intel_frontbuffer_flip(to_i915(dev), INTEL_FRONTBUFFER_ALL_MASK(pipe));
5374 }
5375
5376 static void intel_encoders_pre_pll_enable(struct drm_crtc *crtc,
5377                                           struct intel_crtc_state *crtc_state,
5378                                           struct drm_atomic_state *old_state)
5379 {
5380         struct drm_connector_state *conn_state;
5381         struct drm_connector *conn;
5382         int i;
5383
5384         for_each_new_connector_in_state(old_state, conn, conn_state, i) {
5385                 struct intel_encoder *encoder =
5386                         to_intel_encoder(conn_state->best_encoder);
5387
5388                 if (conn_state->crtc != crtc)
5389                         continue;
5390
5391                 if (encoder->pre_pll_enable)
5392                         encoder->pre_pll_enable(encoder, crtc_state, conn_state);
5393         }
5394 }
5395
5396 static void intel_encoders_pre_enable(struct drm_crtc *crtc,
5397                                       struct intel_crtc_state *crtc_state,
5398                                       struct drm_atomic_state *old_state)
5399 {
5400         struct drm_connector_state *conn_state;
5401         struct drm_connector *conn;
5402         int i;
5403
5404         for_each_new_connector_in_state(old_state, conn, conn_state, i) {
5405                 struct intel_encoder *encoder =
5406                         to_intel_encoder(conn_state->best_encoder);
5407
5408                 if (conn_state->crtc != crtc)
5409                         continue;
5410
5411                 if (encoder->pre_enable)
5412                         encoder->pre_enable(encoder, crtc_state, conn_state);
5413         }
5414 }
5415
5416 static void intel_encoders_enable(struct drm_crtc *crtc,
5417                                   struct intel_crtc_state *crtc_state,
5418                                   struct drm_atomic_state *old_state)
5419 {
5420         struct drm_connector_state *conn_state;
5421         struct drm_connector *conn;
5422         int i;
5423
5424         for_each_new_connector_in_state(old_state, conn, conn_state, i) {
5425                 struct intel_encoder *encoder =
5426                         to_intel_encoder(conn_state->best_encoder);
5427
5428                 if (conn_state->crtc != crtc)
5429                         continue;
5430
5431                 encoder->enable(encoder, crtc_state, conn_state);
5432                 intel_opregion_notify_encoder(encoder, true);
5433         }
5434 }
5435
5436 static void intel_encoders_disable(struct drm_crtc *crtc,
5437                                    struct intel_crtc_state *old_crtc_state,
5438                                    struct drm_atomic_state *old_state)
5439 {
5440         struct drm_connector_state *old_conn_state;
5441         struct drm_connector *conn;
5442         int i;
5443
5444         for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
5445                 struct intel_encoder *encoder =
5446                         to_intel_encoder(old_conn_state->best_encoder);
5447
5448                 if (old_conn_state->crtc != crtc)
5449                         continue;
5450
5451                 intel_opregion_notify_encoder(encoder, false);
5452                 encoder->disable(encoder, old_crtc_state, old_conn_state);
5453         }
5454 }
5455
5456 static void intel_encoders_post_disable(struct drm_crtc *crtc,
5457                                         struct intel_crtc_state *old_crtc_state,
5458                                         struct drm_atomic_state *old_state)
5459 {
5460         struct drm_connector_state *old_conn_state;
5461         struct drm_connector *conn;
5462         int i;
5463
5464         for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
5465                 struct intel_encoder *encoder =
5466                         to_intel_encoder(old_conn_state->best_encoder);
5467
5468                 if (old_conn_state->crtc != crtc)
5469                         continue;
5470
5471                 if (encoder->post_disable)
5472                         encoder->post_disable(encoder, old_crtc_state, old_conn_state);
5473         }
5474 }
5475
5476 static void intel_encoders_post_pll_disable(struct drm_crtc *crtc,
5477                                             struct intel_crtc_state *old_crtc_state,
5478                                             struct drm_atomic_state *old_state)
5479 {
5480         struct drm_connector_state *old_conn_state;
5481         struct drm_connector *conn;
5482         int i;
5483
5484         for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
5485                 struct intel_encoder *encoder =
5486                         to_intel_encoder(old_conn_state->best_encoder);
5487
5488                 if (old_conn_state->crtc != crtc)
5489                         continue;
5490
5491                 if (encoder->post_pll_disable)
5492                         encoder->post_pll_disable(encoder, old_crtc_state, old_conn_state);
5493         }
5494 }
5495
5496 static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
5497                                  struct drm_atomic_state *old_state)
5498 {
5499         struct drm_crtc *crtc = pipe_config->base.crtc;
5500         struct drm_device *dev = crtc->dev;
5501         struct drm_i915_private *dev_priv = to_i915(dev);
5502         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5503         int pipe = intel_crtc->pipe;
5504         struct intel_atomic_state *old_intel_state =
5505                 to_intel_atomic_state(old_state);
5506
5507         if (WARN_ON(intel_crtc->active))
5508                 return;
5509
5510         /*
5511          * Sometimes spurious CPU pipe underruns happen during FDI
5512          * training, at least with VGA+HDMI cloning. Suppress them.
5513          *
5514          * On ILK we get an occasional spurious CPU pipe underruns
5515          * between eDP port A enable and vdd enable. Also PCH port
5516          * enable seems to result in the occasional CPU pipe underrun.
5517          *
5518          * Spurious PCH underruns also occur during PCH enabling.
5519          */
5520         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5521         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
5522
5523         if (intel_crtc->config->has_pch_encoder)
5524                 intel_prepare_shared_dpll(intel_crtc);
5525
5526         if (intel_crtc_has_dp_encoder(intel_crtc->config))
5527                 intel_dp_set_m_n(intel_crtc, M1_N1);
5528
5529         intel_set_pipe_timings(intel_crtc);
5530         intel_set_pipe_src_size(intel_crtc);
5531
5532         if (intel_crtc->config->has_pch_encoder) {
5533                 intel_cpu_transcoder_set_m_n(intel_crtc,
5534                                      &intel_crtc->config->fdi_m_n, NULL);
5535         }
5536
5537         ironlake_set_pipeconf(crtc);
5538
5539         intel_crtc->active = true;
5540
5541         intel_encoders_pre_enable(crtc, pipe_config, old_state);
5542
5543         if (intel_crtc->config->has_pch_encoder) {
5544                 /* Note: FDI PLL enabling _must_ be done before we enable the
5545                  * cpu pipes, hence this is separate from all the other fdi/pch
5546                  * enabling. */
5547                 ironlake_fdi_pll_enable(intel_crtc);
5548         } else {
5549                 assert_fdi_tx_disabled(dev_priv, pipe);
5550                 assert_fdi_rx_disabled(dev_priv, pipe);
5551         }
5552
5553         ironlake_pfit_enable(intel_crtc);
5554
5555         /*
5556          * On ILK+ LUT must be loaded before the pipe is running but with
5557          * clocks enabled
5558          */
5559         intel_color_load_luts(&pipe_config->base);
5560
5561         if (dev_priv->display.initial_watermarks != NULL)
5562                 dev_priv->display.initial_watermarks(old_intel_state, intel_crtc->config);
5563         intel_enable_pipe(pipe_config);
5564
5565         if (intel_crtc->config->has_pch_encoder)
5566                 ironlake_pch_enable(old_intel_state, pipe_config);
5567
5568         assert_vblank_disabled(crtc);
5569         drm_crtc_vblank_on(crtc);
5570
5571         intel_encoders_enable(crtc, pipe_config, old_state);
5572
5573         if (HAS_PCH_CPT(dev_priv))
5574                 cpt_verify_modeset(dev, intel_crtc->pipe);
5575
5576         /*
5577          * Must wait for vblank to avoid spurious PCH FIFO underruns.
5578          * And a second vblank wait is needed at least on ILK with
5579          * some interlaced HDMI modes. Let's do the double wait always
5580          * in case there are more corner cases we don't know about.
5581          */
5582         if (intel_crtc->config->has_pch_encoder) {
5583                 intel_wait_for_vblank(dev_priv, pipe);
5584                 intel_wait_for_vblank(dev_priv, pipe);
5585         }
5586         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5587         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
5588 }
5589
5590 /* IPS only exists on ULT machines and is tied to pipe A. */
5591 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
5592 {
5593         return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
5594 }
5595
5596 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
5597                                             enum pipe pipe, bool apply)
5598 {
5599         u32 val = I915_READ(CLKGATE_DIS_PSL(pipe));
5600         u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
5601
5602         if (apply)
5603                 val |= mask;
5604         else
5605                 val &= ~mask;
5606
5607         I915_WRITE(CLKGATE_DIS_PSL(pipe), val);
5608 }
5609
5610 static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
5611 {
5612         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5613         enum pipe pipe = crtc->pipe;
5614         uint32_t val;
5615
5616         val = MBUS_DBOX_BW_CREDIT(1) | MBUS_DBOX_A_CREDIT(2);
5617
5618         /* Program B credit equally to all pipes */
5619         val |= MBUS_DBOX_B_CREDIT(24 / INTEL_INFO(dev_priv)->num_pipes);
5620
5621         I915_WRITE(PIPE_MBUS_DBOX_CTL(pipe), val);
5622 }
5623
5624 static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
5625                                 struct drm_atomic_state *old_state)
5626 {
5627         struct drm_crtc *crtc = pipe_config->base.crtc;
5628         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
5629         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5630         int pipe = intel_crtc->pipe, hsw_workaround_pipe;
5631         enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
5632         struct intel_atomic_state *old_intel_state =
5633                 to_intel_atomic_state(old_state);
5634         bool psl_clkgate_wa;
5635         u32 pipe_chicken;
5636
5637         if (WARN_ON(intel_crtc->active))
5638                 return;
5639
5640         intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
5641
5642         if (intel_crtc->config->shared_dpll)
5643                 intel_enable_shared_dpll(intel_crtc);
5644
5645         if (INTEL_GEN(dev_priv) >= 11)
5646                 icl_map_plls_to_ports(crtc, pipe_config, old_state);
5647
5648         intel_encoders_pre_enable(crtc, pipe_config, old_state);
5649
5650         if (intel_crtc_has_dp_encoder(intel_crtc->config))
5651                 intel_dp_set_m_n(intel_crtc, M1_N1);
5652
5653         if (!transcoder_is_dsi(cpu_transcoder))
5654                 intel_set_pipe_timings(intel_crtc);
5655
5656         intel_set_pipe_src_size(intel_crtc);
5657
5658         if (cpu_transcoder != TRANSCODER_EDP &&
5659             !transcoder_is_dsi(cpu_transcoder)) {
5660                 I915_WRITE(PIPE_MULT(cpu_transcoder),
5661                            intel_crtc->config->pixel_multiplier - 1);
5662         }
5663
5664         if (intel_crtc->config->has_pch_encoder) {
5665                 intel_cpu_transcoder_set_m_n(intel_crtc,
5666                                      &intel_crtc->config->fdi_m_n, NULL);
5667         }
5668
5669         if (!transcoder_is_dsi(cpu_transcoder))
5670                 haswell_set_pipeconf(crtc);
5671
5672         haswell_set_pipemisc(crtc);
5673
5674         intel_color_set_csc(&pipe_config->base);
5675
5676         intel_crtc->active = true;
5677
5678         /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
5679         psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
5680                          intel_crtc->config->pch_pfit.enabled;
5681         if (psl_clkgate_wa)
5682                 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
5683
5684         if (INTEL_GEN(dev_priv) >= 9)
5685                 skylake_pfit_enable(intel_crtc);
5686         else
5687                 ironlake_pfit_enable(intel_crtc);
5688
5689         /*
5690          * On ILK+ LUT must be loaded before the pipe is running but with
5691          * clocks enabled
5692          */
5693         intel_color_load_luts(&pipe_config->base);
5694
5695         /*
5696          * Display WA #1153: enable hardware to bypass the alpha math
5697          * and rounding for per-pixel values 00 and 0xff
5698          */
5699         if (INTEL_GEN(dev_priv) >= 11) {
5700                 pipe_chicken = I915_READ(PIPE_CHICKEN(pipe));
5701                 if (!(pipe_chicken & PER_PIXEL_ALPHA_BYPASS_EN))
5702                         I915_WRITE_FW(PIPE_CHICKEN(pipe),
5703                                       pipe_chicken | PER_PIXEL_ALPHA_BYPASS_EN);
5704         }
5705
5706         intel_ddi_set_pipe_settings(pipe_config);
5707         if (!transcoder_is_dsi(cpu_transcoder))
5708                 intel_ddi_enable_transcoder_func(pipe_config);
5709
5710         if (dev_priv->display.initial_watermarks != NULL)
5711                 dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
5712
5713         if (INTEL_GEN(dev_priv) >= 11)
5714                 icl_pipe_mbus_enable(intel_crtc);
5715
5716         /* XXX: Do the pipe assertions at the right place for BXT DSI. */
5717         if (!transcoder_is_dsi(cpu_transcoder))
5718                 intel_enable_pipe(pipe_config);
5719
5720         if (intel_crtc->config->has_pch_encoder)
5721                 lpt_pch_enable(old_intel_state, pipe_config);
5722
5723         if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DP_MST))
5724                 intel_ddi_set_vc_payload_alloc(pipe_config, true);
5725
5726         assert_vblank_disabled(crtc);
5727         drm_crtc_vblank_on(crtc);
5728
5729         intel_encoders_enable(crtc, pipe_config, old_state);
5730
5731         if (psl_clkgate_wa) {
5732                 intel_wait_for_vblank(dev_priv, pipe);
5733                 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
5734         }
5735
5736         /* If we change the relative order between pipe/planes enabling, we need
5737          * to change the workaround. */
5738         hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
5739         if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
5740                 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
5741                 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
5742         }
5743 }
5744
5745 static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force)
5746 {
5747         struct drm_device *dev = crtc->base.dev;
5748         struct drm_i915_private *dev_priv = to_i915(dev);
5749         int pipe = crtc->pipe;
5750
5751         /* To avoid upsetting the power well on haswell only disable the pfit if
5752          * it's in use. The hw state code will make sure we get this right. */
5753         if (force || crtc->config->pch_pfit.enabled) {
5754                 I915_WRITE(PF_CTL(pipe), 0);
5755                 I915_WRITE(PF_WIN_POS(pipe), 0);
5756                 I915_WRITE(PF_WIN_SZ(pipe), 0);
5757         }
5758 }
5759
5760 static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
5761                                   struct drm_atomic_state *old_state)
5762 {
5763         struct drm_crtc *crtc = old_crtc_state->base.crtc;
5764         struct drm_device *dev = crtc->dev;
5765         struct drm_i915_private *dev_priv = to_i915(dev);
5766         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5767         int pipe = intel_crtc->pipe;
5768
5769         /*
5770          * Sometimes spurious CPU pipe underruns happen when the
5771          * pipe is already disabled, but FDI RX/TX is still enabled.
5772          * Happens at least with VGA+HDMI cloning. Suppress them.
5773          */
5774         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5775         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
5776
5777         intel_encoders_disable(crtc, old_crtc_state, old_state);
5778
5779         drm_crtc_vblank_off(crtc);
5780         assert_vblank_disabled(crtc);
5781
5782         intel_disable_pipe(old_crtc_state);
5783
5784         ironlake_pfit_disable(intel_crtc, false);
5785
5786         if (intel_crtc->config->has_pch_encoder)
5787                 ironlake_fdi_disable(crtc);
5788
5789         intel_encoders_post_disable(crtc, old_crtc_state, old_state);
5790
5791         if (intel_crtc->config->has_pch_encoder) {
5792                 ironlake_disable_pch_transcoder(dev_priv, pipe);
5793
5794                 if (HAS_PCH_CPT(dev_priv)) {
5795                         i915_reg_t reg;
5796                         u32 temp;
5797
5798                         /* disable TRANS_DP_CTL */
5799                         reg = TRANS_DP_CTL(pipe);
5800                         temp = I915_READ(reg);
5801                         temp &= ~(TRANS_DP_OUTPUT_ENABLE |
5802                                   TRANS_DP_PORT_SEL_MASK);
5803                         temp |= TRANS_DP_PORT_SEL_NONE;
5804                         I915_WRITE(reg, temp);
5805
5806                         /* disable DPLL_SEL */
5807                         temp = I915_READ(PCH_DPLL_SEL);
5808                         temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
5809                         I915_WRITE(PCH_DPLL_SEL, temp);
5810                 }
5811
5812                 ironlake_fdi_pll_disable(intel_crtc);
5813         }
5814
5815         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5816         intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
5817 }
5818
5819 static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
5820                                  struct drm_atomic_state *old_state)
5821 {
5822         struct drm_crtc *crtc = old_crtc_state->base.crtc;
5823         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
5824         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5825         enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
5826
5827         intel_encoders_disable(crtc, old_crtc_state, old_state);
5828
5829         drm_crtc_vblank_off(crtc);
5830         assert_vblank_disabled(crtc);
5831
5832         /* XXX: Do the pipe assertions at the right place for BXT DSI. */
5833         if (!transcoder_is_dsi(cpu_transcoder))
5834                 intel_disable_pipe(old_crtc_state);
5835
5836         if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST))
5837                 intel_ddi_set_vc_payload_alloc(old_crtc_state, false);
5838
5839         if (!transcoder_is_dsi(cpu_transcoder))
5840                 intel_ddi_disable_transcoder_func(old_crtc_state);
5841
5842         if (INTEL_GEN(dev_priv) >= 9)
5843                 skylake_scaler_disable(intel_crtc);
5844         else
5845                 ironlake_pfit_disable(intel_crtc, false);
5846
5847         intel_encoders_post_disable(crtc, old_crtc_state, old_state);
5848
5849         if (INTEL_GEN(dev_priv) >= 11)
5850                 icl_unmap_plls_to_ports(crtc, old_crtc_state, old_state);
5851 }
5852
5853 static void i9xx_pfit_enable(struct intel_crtc *crtc)
5854 {
5855         struct drm_device *dev = crtc->base.dev;
5856         struct drm_i915_private *dev_priv = to_i915(dev);
5857         struct intel_crtc_state *pipe_config = crtc->config;
5858
5859         if (!pipe_config->gmch_pfit.control)
5860                 return;
5861
5862         /*
5863          * The panel fitter should only be adjusted whilst the pipe is disabled,
5864          * according to register description and PRM.
5865          */
5866         WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
5867         assert_pipe_disabled(dev_priv, crtc->pipe);
5868
5869         I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios);
5870         I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control);
5871
5872         /* Border color in case we don't scale up to the full screen. Black by
5873          * default, change to something else for debugging. */
5874         I915_WRITE(BCLRPAT(crtc->pipe), 0);
5875 }
5876
5877 bool intel_port_is_tc(struct drm_i915_private *dev_priv, enum port port)
5878 {
5879         if (IS_ICELAKE(dev_priv))
5880                 return port >= PORT_C && port <= PORT_F;
5881
5882         return false;
5883 }
5884
5885 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
5886 {
5887         if (!intel_port_is_tc(dev_priv, port))
5888                 return PORT_TC_NONE;
5889
5890         return port - PORT_C;
5891 }
5892
5893 enum intel_display_power_domain intel_port_to_power_domain(enum port port)
5894 {
5895         switch (port) {
5896         case PORT_A:
5897                 return POWER_DOMAIN_PORT_DDI_A_LANES;
5898         case PORT_B:
5899                 return POWER_DOMAIN_PORT_DDI_B_LANES;
5900         case PORT_C:
5901                 return POWER_DOMAIN_PORT_DDI_C_LANES;
5902         case PORT_D:
5903                 return POWER_DOMAIN_PORT_DDI_D_LANES;
5904         case PORT_E:
5905                 return POWER_DOMAIN_PORT_DDI_E_LANES;
5906         case PORT_F:
5907                 return POWER_DOMAIN_PORT_DDI_F_LANES;
5908         default:
5909                 MISSING_CASE(port);
5910                 return POWER_DOMAIN_PORT_OTHER;
5911         }
5912 }
5913
5914 static u64 get_crtc_power_domains(struct drm_crtc *crtc,
5915                                   struct intel_crtc_state *crtc_state)
5916 {
5917         struct drm_device *dev = crtc->dev;
5918         struct drm_i915_private *dev_priv = to_i915(dev);
5919         struct drm_encoder *encoder;
5920         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5921         enum pipe pipe = intel_crtc->pipe;
5922         u64 mask;
5923         enum transcoder transcoder = crtc_state->cpu_transcoder;
5924
5925         if (!crtc_state->base.active)
5926                 return 0;
5927
5928         mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
5929         mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder));
5930         if (crtc_state->pch_pfit.enabled ||
5931             crtc_state->pch_pfit.force_thru)
5932                 mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
5933
5934         drm_for_each_encoder_mask(encoder, dev, crtc_state->base.encoder_mask) {
5935                 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
5936
5937                 mask |= BIT_ULL(intel_encoder->power_domain);
5938         }
5939
5940         if (HAS_DDI(dev_priv) && crtc_state->has_audio)
5941                 mask |= BIT_ULL(POWER_DOMAIN_AUDIO);
5942
5943         if (crtc_state->shared_dpll)
5944                 mask |= BIT_ULL(POWER_DOMAIN_PLLS);
5945
5946         return mask;
5947 }
5948
5949 static u64
5950 modeset_get_crtc_power_domains(struct drm_crtc *crtc,
5951                                struct intel_crtc_state *crtc_state)
5952 {
5953         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
5954         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5955         enum intel_display_power_domain domain;
5956         u64 domains, new_domains, old_domains;
5957
5958         old_domains = intel_crtc->enabled_power_domains;
5959         intel_crtc->enabled_power_domains = new_domains =
5960                 get_crtc_power_domains(crtc, crtc_state);
5961
5962         domains = new_domains & ~old_domains;
5963
5964         for_each_power_domain(domain, domains)
5965                 intel_display_power_get(dev_priv, domain);
5966
5967         return old_domains & ~new_domains;
5968 }
5969
5970 static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
5971                                       u64 domains)
5972 {
5973         enum intel_display_power_domain domain;
5974
5975         for_each_power_domain(domain, domains)
5976                 intel_display_power_put(dev_priv, domain);
5977 }
5978
5979 static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
5980                                    struct drm_atomic_state *old_state)
5981 {
5982         struct intel_atomic_state *old_intel_state =
5983                 to_intel_atomic_state(old_state);
5984         struct drm_crtc *crtc = pipe_config->base.crtc;
5985         struct drm_device *dev = crtc->dev;
5986         struct drm_i915_private *dev_priv = to_i915(dev);
5987         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5988         int pipe = intel_crtc->pipe;
5989
5990         if (WARN_ON(intel_crtc->active))
5991                 return;
5992
5993         if (intel_crtc_has_dp_encoder(intel_crtc->config))
5994                 intel_dp_set_m_n(intel_crtc, M1_N1);
5995
5996         intel_set_pipe_timings(intel_crtc);
5997         intel_set_pipe_src_size(intel_crtc);
5998
5999         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
6000                 struct drm_i915_private *dev_priv = to_i915(dev);
6001
6002                 I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
6003                 I915_WRITE(CHV_CANVAS(pipe), 0);
6004         }
6005
6006         i9xx_set_pipeconf(intel_crtc);
6007
6008         intel_crtc->active = true;
6009
6010         intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6011
6012         intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
6013
6014         if (IS_CHERRYVIEW(dev_priv)) {
6015                 chv_prepare_pll(intel_crtc, intel_crtc->config);
6016                 chv_enable_pll(intel_crtc, intel_crtc->config);
6017         } else {
6018                 vlv_prepare_pll(intel_crtc, intel_crtc->config);
6019                 vlv_enable_pll(intel_crtc, intel_crtc->config);
6020         }
6021
6022         intel_encoders_pre_enable(crtc, pipe_config, old_state);
6023
6024         i9xx_pfit_enable(intel_crtc);
6025
6026         intel_color_load_luts(&pipe_config->base);
6027
6028         dev_priv->display.initial_watermarks(old_intel_state,
6029                                              pipe_config);
6030         intel_enable_pipe(pipe_config);
6031
6032         assert_vblank_disabled(crtc);
6033         drm_crtc_vblank_on(crtc);
6034
6035         intel_encoders_enable(crtc, pipe_config, old_state);
6036 }
6037
6038 static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
6039 {
6040         struct drm_device *dev = crtc->base.dev;
6041         struct drm_i915_private *dev_priv = to_i915(dev);
6042
6043         I915_WRITE(FP0(crtc->pipe), crtc->config->dpll_hw_state.fp0);
6044         I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1);
6045 }
6046
6047 static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
6048                              struct drm_atomic_state *old_state)
6049 {
6050         struct intel_atomic_state *old_intel_state =
6051                 to_intel_atomic_state(old_state);
6052         struct drm_crtc *crtc = pipe_config->base.crtc;
6053         struct drm_device *dev = crtc->dev;
6054         struct drm_i915_private *dev_priv = to_i915(dev);
6055         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6056         enum pipe pipe = intel_crtc->pipe;
6057
6058         if (WARN_ON(intel_crtc->active))
6059                 return;
6060
6061         i9xx_set_pll_dividers(intel_crtc);
6062
6063         if (intel_crtc_has_dp_encoder(intel_crtc->config))
6064                 intel_dp_set_m_n(intel_crtc, M1_N1);
6065
6066         intel_set_pipe_timings(intel_crtc);
6067         intel_set_pipe_src_size(intel_crtc);
6068
6069         i9xx_set_pipeconf(intel_crtc);
6070
6071         intel_crtc->active = true;
6072
6073         if (!IS_GEN2(dev_priv))
6074                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6075
6076         intel_encoders_pre_enable(crtc, pipe_config, old_state);
6077
6078         i9xx_enable_pll(intel_crtc, pipe_config);
6079
6080         i9xx_pfit_enable(intel_crtc);
6081
6082         intel_color_load_luts(&pipe_config->base);
6083
6084         if (dev_priv->display.initial_watermarks != NULL)
6085                 dev_priv->display.initial_watermarks(old_intel_state,
6086                                                      intel_crtc->config);
6087         else
6088                 intel_update_watermarks(intel_crtc);
6089         intel_enable_pipe(pipe_config);
6090
6091         assert_vblank_disabled(crtc);
6092         drm_crtc_vblank_on(crtc);
6093
6094         intel_encoders_enable(crtc, pipe_config, old_state);
6095 }
6096
6097 static void i9xx_pfit_disable(struct intel_crtc *crtc)
6098 {
6099         struct drm_device *dev = crtc->base.dev;
6100         struct drm_i915_private *dev_priv = to_i915(dev);
6101
6102         if (!crtc->config->gmch_pfit.control)
6103                 return;
6104
6105         assert_pipe_disabled(dev_priv, crtc->pipe);
6106
6107         DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n",
6108                          I915_READ(PFIT_CONTROL));
6109         I915_WRITE(PFIT_CONTROL, 0);
6110 }
6111
6112 static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
6113                               struct drm_atomic_state *old_state)
6114 {
6115         struct drm_crtc *crtc = old_crtc_state->base.crtc;
6116         struct drm_device *dev = crtc->dev;
6117         struct drm_i915_private *dev_priv = to_i915(dev);
6118         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6119         int pipe = intel_crtc->pipe;
6120
6121         /*
6122          * On gen2 planes are double buffered but the pipe isn't, so we must
6123          * wait for planes to fully turn off before disabling the pipe.
6124          */
6125         if (IS_GEN2(dev_priv))
6126                 intel_wait_for_vblank(dev_priv, pipe);
6127
6128         intel_encoders_disable(crtc, old_crtc_state, old_state);
6129
6130         drm_crtc_vblank_off(crtc);
6131         assert_vblank_disabled(crtc);
6132
6133         intel_disable_pipe(old_crtc_state);
6134
6135         i9xx_pfit_disable(intel_crtc);
6136
6137         intel_encoders_post_disable(crtc, old_crtc_state, old_state);
6138
6139         if (!intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DSI)) {
6140                 if (IS_CHERRYVIEW(dev_priv))
6141                         chv_disable_pll(dev_priv, pipe);
6142                 else if (IS_VALLEYVIEW(dev_priv))
6143                         vlv_disable_pll(dev_priv, pipe);
6144                 else
6145                         i9xx_disable_pll(intel_crtc);
6146         }
6147
6148         intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
6149
6150         if (!IS_GEN2(dev_priv))
6151                 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6152
6153         if (!dev_priv->display.initial_watermarks)
6154                 intel_update_watermarks(intel_crtc);
6155
6156         /* clock the pipe down to 640x480@60 to potentially save power */
6157         if (IS_I830(dev_priv))
6158                 i830_enable_pipe(dev_priv, pipe);
6159 }
6160
6161 static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
6162                                         struct drm_modeset_acquire_ctx *ctx)
6163 {
6164         struct intel_encoder *encoder;
6165         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6166         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6167         enum intel_display_power_domain domain;
6168         struct intel_plane *plane;
6169         u64 domains;
6170         struct drm_atomic_state *state;
6171         struct intel_crtc_state *crtc_state;
6172         int ret;
6173
6174         if (!intel_crtc->active)
6175                 return;
6176
6177         for_each_intel_plane_on_crtc(&dev_priv->drm, intel_crtc, plane) {
6178                 const struct intel_plane_state *plane_state =
6179                         to_intel_plane_state(plane->base.state);
6180
6181                 if (plane_state->base.visible)
6182                         intel_plane_disable_noatomic(intel_crtc, plane);
6183         }
6184
6185         state = drm_atomic_state_alloc(crtc->dev);
6186         if (!state) {
6187                 DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory",
6188                               crtc->base.id, crtc->name);
6189                 return;
6190         }
6191
6192         state->acquire_ctx = ctx;
6193
6194         /* Everything's already locked, -EDEADLK can't happen. */
6195         crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
6196         ret = drm_atomic_add_affected_connectors(state, crtc);
6197
6198         WARN_ON(IS_ERR(crtc_state) || ret);
6199
6200         dev_priv->display.crtc_disable(crtc_state, state);
6201
6202         drm_atomic_state_put(state);
6203
6204         DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
6205                       crtc->base.id, crtc->name);
6206
6207         WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0);
6208         crtc->state->active = false;
6209         intel_crtc->active = false;
6210         crtc->enabled = false;
6211         crtc->state->connector_mask = 0;
6212         crtc->state->encoder_mask = 0;
6213
6214         for_each_encoder_on_crtc(crtc->dev, crtc, encoder)
6215                 encoder->base.crtc = NULL;
6216
6217         intel_fbc_disable(intel_crtc);
6218         intel_update_watermarks(intel_crtc);
6219         intel_disable_shared_dpll(intel_crtc);
6220
6221         domains = intel_crtc->enabled_power_domains;
6222         for_each_power_domain(domain, domains)
6223                 intel_display_power_put(dev_priv, domain);
6224         intel_crtc->enabled_power_domains = 0;
6225
6226         dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe);
6227         dev_priv->min_cdclk[intel_crtc->pipe] = 0;
6228         dev_priv->min_voltage_level[intel_crtc->pipe] = 0;
6229 }
6230
6231 /*
6232  * turn all crtc's off, but do not adjust state
6233  * This has to be paired with a call to intel_modeset_setup_hw_state.
6234  */
6235 int intel_display_suspend(struct drm_device *dev)
6236 {
6237         struct drm_i915_private *dev_priv = to_i915(dev);
6238         struct drm_atomic_state *state;
6239         int ret;
6240
6241         state = drm_atomic_helper_suspend(dev);
6242         ret = PTR_ERR_OR_ZERO(state);
6243         if (ret)
6244                 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
6245         else
6246                 dev_priv->modeset_restore_state = state;
6247         return ret;
6248 }
6249
6250 void intel_encoder_destroy(struct drm_encoder *encoder)
6251 {
6252         struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
6253
6254         drm_encoder_cleanup(encoder);
6255         kfree(intel_encoder);
6256 }
6257
6258 /* Cross check the actual hw state with our own modeset state tracking (and it's
6259  * internal consistency). */
6260 static void intel_connector_verify_state(struct drm_crtc_state *crtc_state,
6261                                          struct drm_connector_state *conn_state)
6262 {
6263         struct intel_connector *connector = to_intel_connector(conn_state->connector);
6264
6265         DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
6266                       connector->base.base.id,
6267                       connector->base.name);
6268
6269         if (connector->get_hw_state(connector)) {
6270                 struct intel_encoder *encoder = connector->encoder;
6271
6272                 I915_STATE_WARN(!crtc_state,
6273                          "connector enabled without attached crtc\n");
6274
6275                 if (!crtc_state)
6276                         return;
6277
6278                 I915_STATE_WARN(!crtc_state->active,
6279                       "connector is active, but attached crtc isn't\n");
6280
6281                 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
6282                         return;
6283
6284                 I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
6285                         "atomic encoder doesn't match attached encoder\n");
6286
6287                 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
6288                         "attached encoder crtc differs from connector crtc\n");
6289         } else {
6290                 I915_STATE_WARN(crtc_state && crtc_state->active,
6291                         "attached crtc is active, but connector isn't\n");
6292                 I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
6293                         "best encoder set without crtc!\n");
6294         }
6295 }
6296
6297 int intel_connector_init(struct intel_connector *connector)
6298 {
6299         struct intel_digital_connector_state *conn_state;
6300
6301         /*
6302          * Allocate enough memory to hold intel_digital_connector_state,
6303          * This might be a few bytes too many, but for connectors that don't
6304          * need it we'll free the state and allocate a smaller one on the first
6305          * succesful commit anyway.
6306          */
6307         conn_state = kzalloc(sizeof(*conn_state), GFP_KERNEL);
6308         if (!conn_state)
6309                 return -ENOMEM;
6310
6311         __drm_atomic_helper_connector_reset(&connector->base,
6312                                             &conn_state->base);
6313
6314         return 0;
6315 }
6316
6317 struct intel_connector *intel_connector_alloc(void)
6318 {
6319         struct intel_connector *connector;
6320
6321         connector = kzalloc(sizeof *connector, GFP_KERNEL);
6322         if (!connector)
6323                 return NULL;
6324
6325         if (intel_connector_init(connector) < 0) {
6326                 kfree(connector);
6327                 return NULL;
6328         }
6329
6330         return connector;
6331 }
6332
6333 /*
6334  * Free the bits allocated by intel_connector_alloc.
6335  * This should only be used after intel_connector_alloc has returned
6336  * successfully, and before drm_connector_init returns successfully.
6337  * Otherwise the destroy callbacks for the connector and the state should
6338  * take care of proper cleanup/free
6339  */
6340 void intel_connector_free(struct intel_connector *connector)
6341 {
6342         kfree(to_intel_digital_connector_state(connector->base.state));
6343         kfree(connector);
6344 }
6345
6346 /* Simple connector->get_hw_state implementation for encoders that support only
6347  * one connector and no cloning and hence the encoder state determines the state
6348  * of the connector. */
6349 bool intel_connector_get_hw_state(struct intel_connector *connector)
6350 {
6351         enum pipe pipe = 0;
6352         struct intel_encoder *encoder = connector->encoder;
6353
6354         return encoder->get_hw_state(encoder, &pipe);
6355 }
6356
6357 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
6358 {
6359         if (crtc_state->base.enable && crtc_state->has_pch_encoder)
6360                 return crtc_state->fdi_lanes;
6361
6362         return 0;
6363 }
6364
6365 static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
6366                                      struct intel_crtc_state *pipe_config)
6367 {
6368         struct drm_i915_private *dev_priv = to_i915(dev);
6369         struct drm_atomic_state *state = pipe_config->base.state;
6370         struct intel_crtc *other_crtc;
6371         struct intel_crtc_state *other_crtc_state;
6372
6373         DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
6374                       pipe_name(pipe), pipe_config->fdi_lanes);
6375         if (pipe_config->fdi_lanes > 4) {
6376                 DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
6377                               pipe_name(pipe), pipe_config->fdi_lanes);
6378                 return -EINVAL;
6379         }
6380
6381         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
6382                 if (pipe_config->fdi_lanes > 2) {
6383                         DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
6384                                       pipe_config->fdi_lanes);
6385                         return -EINVAL;
6386                 } else {
6387                         return 0;
6388                 }
6389         }
6390
6391         if (INTEL_INFO(dev_priv)->num_pipes == 2)
6392                 return 0;
6393
6394         /* Ivybridge 3 pipe is really complicated */
6395         switch (pipe) {
6396         case PIPE_A:
6397                 return 0;
6398         case PIPE_B:
6399                 if (pipe_config->fdi_lanes <= 2)
6400                         return 0;
6401
6402                 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C);
6403                 other_crtc_state =
6404                         intel_atomic_get_crtc_state(state, other_crtc);
6405                 if (IS_ERR(other_crtc_state))
6406                         return PTR_ERR(other_crtc_state);
6407
6408                 if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
6409                         DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
6410                                       pipe_name(pipe), pipe_config->fdi_lanes);
6411                         return -EINVAL;
6412                 }
6413                 return 0;
6414         case PIPE_C:
6415                 if (pipe_config->fdi_lanes > 2) {
6416                         DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n",
6417                                       pipe_name(pipe), pipe_config->fdi_lanes);
6418                         return -EINVAL;
6419                 }
6420
6421                 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B);
6422                 other_crtc_state =
6423                         intel_atomic_get_crtc_state(state, other_crtc);
6424                 if (IS_ERR(other_crtc_state))
6425                         return PTR_ERR(other_crtc_state);
6426
6427                 if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
6428                         DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
6429                         return -EINVAL;
6430                 }
6431                 return 0;
6432         default:
6433                 BUG();
6434         }
6435 }
6436
6437 #define RETRY 1
6438 static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
6439                                        struct intel_crtc_state *pipe_config)
6440 {
6441         struct drm_device *dev = intel_crtc->base.dev;
6442         const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
6443         int lane, link_bw, fdi_dotclock, ret;
6444         bool needs_recompute = false;
6445
6446 retry:
6447         /* FDI is a binary signal running at ~2.7GHz, encoding
6448          * each output octet as 10 bits. The actual frequency
6449          * is stored as a divider into a 100MHz clock, and the
6450          * mode pixel clock is stored in units of 1KHz.
6451          * Hence the bw of each lane in terms of the mode signal
6452          * is:
6453          */
6454         link_bw = intel_fdi_link_freq(to_i915(dev), pipe_config);
6455
6456         fdi_dotclock = adjusted_mode->crtc_clock;
6457
6458         lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
6459                                            pipe_config->pipe_bpp);
6460
6461         pipe_config->fdi_lanes = lane;
6462
6463         intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
6464                                link_bw, &pipe_config->fdi_m_n, false);
6465
6466         ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
6467         if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
6468                 pipe_config->pipe_bpp -= 2*3;
6469                 DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
6470                               pipe_config->pipe_bpp);
6471                 needs_recompute = true;
6472                 pipe_config->bw_constrained = true;
6473
6474                 goto retry;
6475         }
6476
6477         if (needs_recompute)
6478                 return RETRY;
6479
6480         return ret;
6481 }
6482
6483 bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
6484 {
6485         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
6486         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6487
6488         /* IPS only exists on ULT machines and is tied to pipe A. */
6489         if (!hsw_crtc_supports_ips(crtc))
6490                 return false;
6491
6492         if (!i915_modparams.enable_ips)
6493                 return false;
6494
6495         if (crtc_state->pipe_bpp > 24)
6496                 return false;
6497
6498         /*
6499          * We compare against max which means we must take
6500          * the increased cdclk requirement into account when
6501          * calculating the new cdclk.
6502          *
6503          * Should measure whether using a lower cdclk w/o IPS
6504          */
6505         if (IS_BROADWELL(dev_priv) &&
6506             crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100)
6507                 return false;
6508
6509         return true;
6510 }
6511
6512 static bool hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
6513 {
6514         struct drm_i915_private *dev_priv =
6515                 to_i915(crtc_state->base.crtc->dev);
6516         struct intel_atomic_state *intel_state =
6517                 to_intel_atomic_state(crtc_state->base.state);
6518
6519         if (!hsw_crtc_state_ips_capable(crtc_state))
6520                 return false;
6521
6522         if (crtc_state->ips_force_disable)
6523                 return false;
6524
6525         /* IPS should be fine as long as at least one plane is enabled. */
6526         if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
6527                 return false;
6528
6529         /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
6530         if (IS_BROADWELL(dev_priv) &&
6531             crtc_state->pixel_rate > intel_state->cdclk.logical.cdclk * 95 / 100)
6532                 return false;
6533
6534         return true;
6535 }
6536
6537 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
6538 {
6539         const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6540
6541         /* GDG double wide on either pipe, otherwise pipe A only */
6542         return INTEL_GEN(dev_priv) < 4 &&
6543                 (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
6544 }
6545
6546 static uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
6547 {
6548         uint32_t pixel_rate;
6549
6550         pixel_rate = pipe_config->base.adjusted_mode.crtc_clock;
6551
6552         /*
6553          * We only use IF-ID interlacing. If we ever use
6554          * PF-ID we'll need to adjust the pixel_rate here.
6555          */
6556
6557         if (pipe_config->pch_pfit.enabled) {
6558                 uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
6559                 uint32_t pfit_size = pipe_config->pch_pfit.size;
6560
6561                 pipe_w = pipe_config->pipe_src_w;
6562                 pipe_h = pipe_config->pipe_src_h;
6563
6564                 pfit_w = (pfit_size >> 16) & 0xFFFF;
6565                 pfit_h = pfit_size & 0xFFFF;
6566                 if (pipe_w < pfit_w)
6567                         pipe_w = pfit_w;
6568                 if (pipe_h < pfit_h)
6569                         pipe_h = pfit_h;
6570
6571                 if (WARN_ON(!pfit_w || !pfit_h))
6572                         return pixel_rate;
6573
6574                 pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h,
6575                                      pfit_w * pfit_h);
6576         }
6577
6578         return pixel_rate;
6579 }
6580
6581 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
6582 {
6583         struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
6584
6585         if (HAS_GMCH_DISPLAY(dev_priv))
6586                 /* FIXME calculate proper pipe pixel rate for GMCH pfit */
6587                 crtc_state->pixel_rate =
6588                         crtc_state->base.adjusted_mode.crtc_clock;
6589         else
6590                 crtc_state->pixel_rate =
6591                         ilk_pipe_pixel_rate(crtc_state);
6592 }
6593
6594 static int intel_crtc_compute_config(struct intel_crtc *crtc,
6595                                      struct intel_crtc_state *pipe_config)
6596 {
6597         struct drm_device *dev = crtc->base.dev;
6598         struct drm_i915_private *dev_priv = to_i915(dev);
6599         const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
6600         int clock_limit = dev_priv->max_dotclk_freq;
6601
6602         if (INTEL_GEN(dev_priv) < 4) {
6603                 clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
6604
6605                 /*
6606                  * Enable double wide mode when the dot clock
6607                  * is > 90% of the (display) core speed.
6608                  */
6609                 if (intel_crtc_supports_double_wide(crtc) &&
6610                     adjusted_mode->crtc_clock > clock_limit) {
6611                         clock_limit = dev_priv->max_dotclk_freq;
6612                         pipe_config->double_wide = true;
6613                 }
6614         }
6615
6616         if (adjusted_mode->crtc_clock > clock_limit) {
6617                 DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
6618                               adjusted_mode->crtc_clock, clock_limit,
6619                               yesno(pipe_config->double_wide));
6620                 return -EINVAL;
6621         }
6622
6623         if (pipe_config->ycbcr420 && pipe_config->base.ctm) {
6624                 /*
6625                  * There is only one pipe CSC unit per pipe, and we need that
6626                  * for output conversion from RGB->YCBCR. So if CTM is already
6627                  * applied we can't support YCBCR420 output.
6628                  */
6629                 DRM_DEBUG_KMS("YCBCR420 and CTM together are not possible\n");
6630                 return -EINVAL;
6631         }
6632
6633         /*
6634          * Pipe horizontal size must be even in:
6635          * - DVO ganged mode
6636          * - LVDS dual channel mode
6637          * - Double wide pipe
6638          */
6639         if (pipe_config->pipe_src_w & 1) {
6640                 if (pipe_config->double_wide) {
6641                         DRM_DEBUG_KMS("Odd pipe source width not supported with double wide pipe\n");
6642                         return -EINVAL;
6643                 }
6644
6645                 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
6646                     intel_is_dual_link_lvds(dev)) {
6647                         DRM_DEBUG_KMS("Odd pipe source width not supported with dual link LVDS\n");
6648                         return -EINVAL;
6649                 }
6650         }
6651
6652         /* Cantiga+ cannot handle modes with a hsync front porch of 0.
6653          * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
6654          */
6655         if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) &&
6656                 adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
6657                 return -EINVAL;
6658
6659         intel_crtc_compute_pixel_rate(pipe_config);
6660
6661         if (pipe_config->has_pch_encoder)
6662                 return ironlake_fdi_compute_config(crtc, pipe_config);
6663
6664         return 0;
6665 }
6666
6667 static void
6668 intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
6669 {
6670         while (*num > DATA_LINK_M_N_MASK ||
6671                *den > DATA_LINK_M_N_MASK) {
6672                 *num >>= 1;
6673                 *den >>= 1;
6674         }
6675 }
6676
6677 static void compute_m_n(unsigned int m, unsigned int n,
6678                         uint32_t *ret_m, uint32_t *ret_n,
6679                         bool reduce_m_n)
6680 {
6681         /*
6682          * Reduce M/N as much as possible without loss in precision. Several DP
6683          * dongles in particular seem to be fussy about too large *link* M/N
6684          * values. The passed in values are more likely to have the least
6685          * significant bits zero than M after rounding below, so do this first.
6686          */
6687         if (reduce_m_n) {
6688                 while ((m & 1) == 0 && (n & 1) == 0) {
6689                         m >>= 1;
6690                         n >>= 1;
6691                 }
6692         }
6693
6694         *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
6695         *ret_m = div_u64((uint64_t) m * *ret_n, n);
6696         intel_reduce_m_n_ratio(ret_m, ret_n);
6697 }
6698
6699 void
6700 intel_link_compute_m_n(int bits_per_pixel, int nlanes,
6701                        int pixel_clock, int link_clock,
6702                        struct intel_link_m_n *m_n,
6703                        bool reduce_m_n)
6704 {
6705         m_n->tu = 64;
6706
6707         compute_m_n(bits_per_pixel * pixel_clock,
6708                     link_clock * nlanes * 8,
6709                     &m_n->gmch_m, &m_n->gmch_n,
6710                     reduce_m_n);
6711
6712         compute_m_n(pixel_clock, link_clock,
6713                     &m_n->link_m, &m_n->link_n,
6714                     reduce_m_n);
6715 }
6716
6717 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
6718 {
6719         if (i915_modparams.panel_use_ssc >= 0)
6720                 return i915_modparams.panel_use_ssc != 0;
6721         return dev_priv->vbt.lvds_use_ssc
6722                 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
6723 }
6724
6725 static uint32_t pnv_dpll_compute_fp(struct dpll *dpll)
6726 {
6727         return (1 << dpll->n) << 16 | dpll->m2;
6728 }
6729
6730 static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll)
6731 {
6732         return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
6733 }
6734
6735 static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
6736                                      struct intel_crtc_state *crtc_state,
6737                                      struct dpll *reduced_clock)
6738 {
6739         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6740         u32 fp, fp2 = 0;
6741
6742         if (IS_PINEVIEW(dev_priv)) {
6743                 fp = pnv_dpll_compute_fp(&crtc_state->dpll);
6744                 if (reduced_clock)
6745                         fp2 = pnv_dpll_compute_fp(reduced_clock);
6746         } else {
6747                 fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
6748                 if (reduced_clock)
6749                         fp2 = i9xx_dpll_compute_fp(reduced_clock);
6750         }
6751
6752         crtc_state->dpll_hw_state.fp0 = fp;
6753
6754         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
6755             reduced_clock) {
6756                 crtc_state->dpll_hw_state.fp1 = fp2;
6757         } else {
6758                 crtc_state->dpll_hw_state.fp1 = fp;
6759         }
6760 }
6761
6762 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
6763                 pipe)
6764 {
6765         u32 reg_val;
6766
6767         /*
6768          * PLLB opamp always calibrates to max value of 0x3f, force enable it
6769          * and set it to a reasonable value instead.
6770          */
6771         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
6772         reg_val &= 0xffffff00;
6773         reg_val |= 0x00000030;
6774         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
6775
6776         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
6777         reg_val &= 0x00ffffff;
6778         reg_val |= 0x8c000000;
6779         vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
6780
6781         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
6782         reg_val &= 0xffffff00;
6783         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
6784
6785         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
6786         reg_val &= 0x00ffffff;
6787         reg_val |= 0xb0000000;
6788         vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
6789 }
6790
6791 static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
6792                                          struct intel_link_m_n *m_n)
6793 {
6794         struct drm_device *dev = crtc->base.dev;
6795         struct drm_i915_private *dev_priv = to_i915(dev);
6796         int pipe = crtc->pipe;
6797
6798         I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
6799         I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
6800         I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
6801         I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
6802 }
6803
6804 static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
6805                                          struct intel_link_m_n *m_n,
6806                                          struct intel_link_m_n *m2_n2)
6807 {
6808         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6809         int pipe = crtc->pipe;
6810         enum transcoder transcoder = crtc->config->cpu_transcoder;
6811
6812         if (INTEL_GEN(dev_priv) >= 5) {
6813                 I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
6814                 I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
6815                 I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
6816                 I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
6817                 /* M2_N2 registers to be set only for gen < 8 (M2_N2 available
6818                  * for gen < 8) and if DRRS is supported (to make sure the
6819                  * registers are not unnecessarily accessed).
6820                  */
6821                 if (m2_n2 && (IS_CHERRYVIEW(dev_priv) ||
6822                     INTEL_GEN(dev_priv) < 8) && crtc->config->has_drrs) {
6823                         I915_WRITE(PIPE_DATA_M2(transcoder),
6824                                         TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
6825                         I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
6826                         I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
6827                         I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
6828                 }
6829         } else {
6830                 I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
6831                 I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
6832                 I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
6833                 I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
6834         }
6835 }
6836
6837 void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n)
6838 {
6839         struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
6840
6841         if (m_n == M1_N1) {
6842                 dp_m_n = &crtc->config->dp_m_n;
6843                 dp_m2_n2 = &crtc->config->dp_m2_n2;
6844         } else if (m_n == M2_N2) {
6845
6846                 /*
6847                  * M2_N2 registers are not supported. Hence m2_n2 divider value
6848                  * needs to be programmed into M1_N1.
6849                  */
6850                 dp_m_n = &crtc->config->dp_m2_n2;
6851         } else {
6852                 DRM_ERROR("Unsupported divider value\n");
6853                 return;
6854         }
6855
6856         if (crtc->config->has_pch_encoder)
6857                 intel_pch_transcoder_set_m_n(crtc, &crtc->config->dp_m_n);
6858         else
6859                 intel_cpu_transcoder_set_m_n(crtc, dp_m_n, dp_m2_n2);
6860 }
6861
6862 static void vlv_compute_dpll(struct intel_crtc *crtc,
6863                              struct intel_crtc_state *pipe_config)
6864 {
6865         pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
6866                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
6867         if (crtc->pipe != PIPE_A)
6868                 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
6869
6870         /* DPLL not used with DSI, but still need the rest set up */
6871         if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
6872                 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
6873                         DPLL_EXT_BUFFER_ENABLE_VLV;
6874
6875         pipe_config->dpll_hw_state.dpll_md =
6876                 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
6877 }
6878
6879 static void chv_compute_dpll(struct intel_crtc *crtc,
6880                              struct intel_crtc_state *pipe_config)
6881 {
6882         pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
6883                 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
6884         if (crtc->pipe != PIPE_A)
6885                 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
6886
6887         /* DPLL not used with DSI, but still need the rest set up */
6888         if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
6889                 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
6890
6891         pipe_config->dpll_hw_state.dpll_md =
6892                 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
6893 }
6894
6895 static void vlv_prepare_pll(struct intel_crtc *crtc,
6896                             const struct intel_crtc_state *pipe_config)
6897 {
6898         struct drm_device *dev = crtc->base.dev;
6899         struct drm_i915_private *dev_priv = to_i915(dev);
6900         enum pipe pipe = crtc->pipe;
6901         u32 mdiv;
6902         u32 bestn, bestm1, bestm2, bestp1, bestp2;
6903         u32 coreclk, reg_val;
6904
6905         /* Enable Refclk */
6906         I915_WRITE(DPLL(pipe),
6907                    pipe_config->dpll_hw_state.dpll &
6908                    ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
6909
6910         /* No need to actually set up the DPLL with DSI */
6911         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
6912                 return;
6913
6914         mutex_lock(&dev_priv->sb_lock);
6915
6916         bestn = pipe_config->dpll.n;
6917         bestm1 = pipe_config->dpll.m1;
6918         bestm2 = pipe_config->dpll.m2;
6919         bestp1 = pipe_config->dpll.p1;
6920         bestp2 = pipe_config->dpll.p2;
6921
6922         /* See eDP HDMI DPIO driver vbios notes doc */
6923
6924         /* PLL B needs special handling */
6925         if (pipe == PIPE_B)
6926                 vlv_pllb_recal_opamp(dev_priv, pipe);
6927
6928         /* Set up Tx target for periodic Rcomp update */
6929         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
6930
6931         /* Disable target IRef on PLL */
6932         reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
6933         reg_val &= 0x00ffffff;
6934         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
6935
6936         /* Disable fast lock */
6937         vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
6938
6939         /* Set idtafcrecal before PLL is enabled */
6940         mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
6941         mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
6942         mdiv |= ((bestn << DPIO_N_SHIFT));
6943         mdiv |= (1 << DPIO_K_SHIFT);
6944
6945         /*
6946          * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
6947          * but we don't support that).
6948          * Note: don't use the DAC post divider as it seems unstable.
6949          */
6950         mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
6951         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
6952
6953         mdiv |= DPIO_ENABLE_CALIBRATION;
6954         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
6955
6956         /* Set HBR and RBR LPF coefficients */
6957         if (pipe_config->port_clock == 162000 ||
6958             intel_crtc_has_type(crtc->config, INTEL_OUTPUT_ANALOG) ||
6959             intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI))
6960                 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
6961                                  0x009f0003);
6962         else
6963                 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
6964                                  0x00d0000f);
6965
6966         if (intel_crtc_has_dp_encoder(pipe_config)) {
6967                 /* Use SSC source */
6968                 if (pipe == PIPE_A)
6969                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
6970                                          0x0df40000);
6971                 else
6972                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
6973                                          0x0df70000);
6974         } else { /* HDMI or VGA */
6975                 /* Use bend source */
6976                 if (pipe == PIPE_A)
6977                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
6978                                          0x0df70000);
6979                 else
6980                         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
6981                                          0x0df40000);
6982         }
6983
6984         coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
6985         coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
6986         if (intel_crtc_has_dp_encoder(crtc->config))
6987                 coreclk |= 0x01000000;
6988         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
6989
6990         vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
6991         mutex_unlock(&dev_priv->sb_lock);
6992 }
6993
6994 static void chv_prepare_pll(struct intel_crtc *crtc,
6995                             const struct intel_crtc_state *pipe_config)
6996 {
6997         struct drm_device *dev = crtc->base.dev;
6998         struct drm_i915_private *dev_priv = to_i915(dev);
6999         enum pipe pipe = crtc->pipe;
7000         enum dpio_channel port = vlv_pipe_to_channel(pipe);
7001         u32 loopfilter, tribuf_calcntr;
7002         u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
7003         u32 dpio_val;
7004         int vco;
7005
7006         /* Enable Refclk and SSC */
7007         I915_WRITE(DPLL(pipe),
7008                    pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
7009
7010         /* No need to actually set up the DPLL with DSI */
7011         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7012                 return;
7013
7014         bestn = pipe_config->dpll.n;
7015         bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
7016         bestm1 = pipe_config->dpll.m1;
7017         bestm2 = pipe_config->dpll.m2 >> 22;
7018         bestp1 = pipe_config->dpll.p1;
7019         bestp2 = pipe_config->dpll.p2;
7020         vco = pipe_config->dpll.vco;
7021         dpio_val = 0;
7022         loopfilter = 0;
7023
7024         mutex_lock(&dev_priv->sb_lock);
7025
7026         /* p1 and p2 divider */
7027         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
7028                         5 << DPIO_CHV_S1_DIV_SHIFT |
7029                         bestp1 << DPIO_CHV_P1_DIV_SHIFT |
7030                         bestp2 << DPIO_CHV_P2_DIV_SHIFT |
7031                         1 << DPIO_CHV_K_DIV_SHIFT);
7032
7033         /* Feedback post-divider - m2 */
7034         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
7035
7036         /* Feedback refclk divider - n and m1 */
7037         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
7038                         DPIO_CHV_M1_DIV_BY_2 |
7039                         1 << DPIO_CHV_N_DIV_SHIFT);
7040
7041         /* M2 fraction division */
7042         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
7043
7044         /* M2 fraction division enable */
7045         dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
7046         dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
7047         dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
7048         if (bestm2_frac)
7049                 dpio_val |= DPIO_CHV_FRAC_DIV_EN;
7050         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
7051
7052         /* Program digital lock detect threshold */
7053         dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
7054         dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
7055                                         DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
7056         dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
7057         if (!bestm2_frac)
7058                 dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
7059         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
7060
7061         /* Loop filter */
7062         if (vco == 5400000) {
7063                 loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
7064                 loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
7065                 loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
7066                 tribuf_calcntr = 0x9;
7067         } else if (vco <= 6200000) {
7068                 loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
7069                 loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
7070                 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7071                 tribuf_calcntr = 0x9;
7072         } else if (vco <= 6480000) {
7073                 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7074                 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7075                 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7076                 tribuf_calcntr = 0x8;
7077         } else {
7078                 /* Not supported. Apply the same limits as in the max case */
7079                 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7080                 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7081                 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7082                 tribuf_calcntr = 0;
7083         }
7084         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
7085
7086         dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
7087         dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
7088         dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
7089         vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
7090
7091         /* AFC Recal */
7092         vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
7093                         vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
7094                         DPIO_AFC_RECAL);
7095
7096         mutex_unlock(&dev_priv->sb_lock);
7097 }
7098
7099 /**
7100  * vlv_force_pll_on - forcibly enable just the PLL
7101  * @dev_priv: i915 private structure
7102  * @pipe: pipe PLL to enable
7103  * @dpll: PLL configuration
7104  *
7105  * Enable the PLL for @pipe using the supplied @dpll config. To be used
7106  * in cases where we need the PLL enabled even when @pipe is not going to
7107  * be enabled.
7108  */
7109 int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
7110                      const struct dpll *dpll)
7111 {
7112         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
7113         struct intel_crtc_state *pipe_config;
7114
7115         pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
7116         if (!pipe_config)
7117                 return -ENOMEM;
7118
7119         pipe_config->base.crtc = &crtc->base;
7120         pipe_config->pixel_multiplier = 1;
7121         pipe_config->dpll = *dpll;
7122
7123         if (IS_CHERRYVIEW(dev_priv)) {
7124                 chv_compute_dpll(crtc, pipe_config);
7125                 chv_prepare_pll(crtc, pipe_config);
7126                 chv_enable_pll(crtc, pipe_config);
7127         } else {
7128                 vlv_compute_dpll(crtc, pipe_config);
7129                 vlv_prepare_pll(crtc, pipe_config);
7130                 vlv_enable_pll(crtc, pipe_config);
7131         }
7132
7133         kfree(pipe_config);
7134
7135         return 0;
7136 }
7137
7138 /**
7139  * vlv_force_pll_off - forcibly disable just the PLL
7140  * @dev_priv: i915 private structure
7141  * @pipe: pipe PLL to disable
7142  *
7143  * Disable the PLL for @pipe. To be used in cases where we need
7144  * the PLL enabled even when @pipe is not going to be enabled.
7145  */
7146 void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
7147 {
7148         if (IS_CHERRYVIEW(dev_priv))
7149                 chv_disable_pll(dev_priv, pipe);
7150         else
7151                 vlv_disable_pll(dev_priv, pipe);
7152 }
7153
7154 static void i9xx_compute_dpll(struct intel_crtc *crtc,
7155                               struct intel_crtc_state *crtc_state,
7156                               struct dpll *reduced_clock)
7157 {
7158         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7159         u32 dpll;
7160         struct dpll *clock = &crtc_state->dpll;
7161
7162         i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
7163
7164         dpll = DPLL_VGA_MODE_DIS;
7165
7166         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
7167                 dpll |= DPLLB_MODE_LVDS;
7168         else
7169                 dpll |= DPLLB_MODE_DAC_SERIAL;
7170
7171         if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
7172             IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
7173                 dpll |= (crtc_state->pixel_multiplier - 1)
7174                         << SDVO_MULTIPLIER_SHIFT_HIRES;
7175         }
7176
7177         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
7178             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
7179                 dpll |= DPLL_SDVO_HIGH_SPEED;
7180
7181         if (intel_crtc_has_dp_encoder(crtc_state))
7182                 dpll |= DPLL_SDVO_HIGH_SPEED;
7183
7184         /* compute bitmask from p1 value */
7185         if (IS_PINEVIEW(dev_priv))
7186                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
7187         else {
7188                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7189                 if (IS_G4X(dev_priv) && reduced_clock)
7190                         dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
7191         }
7192         switch (clock->p2) {
7193         case 5:
7194                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
7195                 break;
7196         case 7:
7197                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
7198                 break;
7199         case 10:
7200                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
7201                 break;
7202         case 14:
7203                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
7204                 break;
7205         }
7206         if (INTEL_GEN(dev_priv) >= 4)
7207                 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
7208
7209         if (crtc_state->sdvo_tv_clock)
7210                 dpll |= PLL_REF_INPUT_TVCLKINBC;
7211         else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7212                  intel_panel_use_ssc(dev_priv))
7213                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7214         else
7215                 dpll |= PLL_REF_INPUT_DREFCLK;
7216
7217         dpll |= DPLL_VCO_ENABLE;
7218         crtc_state->dpll_hw_state.dpll = dpll;
7219
7220         if (INTEL_GEN(dev_priv) >= 4) {
7221                 u32 dpll_md = (crtc_state->pixel_multiplier - 1)
7222                         << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7223                 crtc_state->dpll_hw_state.dpll_md = dpll_md;
7224         }
7225 }
7226
7227 static void i8xx_compute_dpll(struct intel_crtc *crtc,
7228                               struct intel_crtc_state *crtc_state,
7229                               struct dpll *reduced_clock)
7230 {
7231         struct drm_device *dev = crtc->base.dev;
7232         struct drm_i915_private *dev_priv = to_i915(dev);
7233         u32 dpll;
7234         struct dpll *clock = &crtc_state->dpll;
7235
7236         i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
7237
7238         dpll = DPLL_VGA_MODE_DIS;
7239
7240         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7241                 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7242         } else {
7243                 if (clock->p1 == 2)
7244                         dpll |= PLL_P1_DIVIDE_BY_TWO;
7245                 else
7246                         dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7247                 if (clock->p2 == 4)
7248                         dpll |= PLL_P2_DIVIDE_BY_4;
7249         }
7250
7251         if (!IS_I830(dev_priv) &&
7252             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
7253                 dpll |= DPLL_DVO_2X_MODE;
7254
7255         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7256             intel_panel_use_ssc(dev_priv))
7257                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7258         else
7259                 dpll |= PLL_REF_INPUT_DREFCLK;
7260
7261         dpll |= DPLL_VCO_ENABLE;
7262         crtc_state->dpll_hw_state.dpll = dpll;
7263 }
7264
7265 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
7266 {
7267         struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
7268         enum pipe pipe = intel_crtc->pipe;
7269         enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
7270         const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
7271         uint32_t crtc_vtotal, crtc_vblank_end;
7272         int vsyncshift = 0;
7273
7274         /* We need to be careful not to changed the adjusted mode, for otherwise
7275          * the hw state checker will get angry at the mismatch. */
7276         crtc_vtotal = adjusted_mode->crtc_vtotal;
7277         crtc_vblank_end = adjusted_mode->crtc_vblank_end;
7278
7279         if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
7280                 /* the chip adds 2 halflines automatically */
7281                 crtc_vtotal -= 1;
7282                 crtc_vblank_end -= 1;
7283
7284                 if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
7285                         vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
7286                 else
7287                         vsyncshift = adjusted_mode->crtc_hsync_start -
7288                                 adjusted_mode->crtc_htotal / 2;
7289                 if (vsyncshift < 0)
7290                         vsyncshift += adjusted_mode->crtc_htotal;
7291         }
7292
7293         if (INTEL_GEN(dev_priv) > 3)
7294                 I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
7295
7296         I915_WRITE(HTOTAL(cpu_transcoder),
7297                    (adjusted_mode->crtc_hdisplay - 1) |
7298                    ((adjusted_mode->crtc_htotal - 1) << 16));
7299         I915_WRITE(HBLANK(cpu_transcoder),
7300                    (adjusted_mode->crtc_hblank_start - 1) |
7301                    ((adjusted_mode->crtc_hblank_end - 1) << 16));
7302         I915_WRITE(HSYNC(cpu_transcoder),
7303                    (adjusted_mode->crtc_hsync_start - 1) |
7304                    ((adjusted_mode->crtc_hsync_end - 1) << 16));
7305
7306         I915_WRITE(VTOTAL(cpu_transcoder),
7307                    (adjusted_mode->crtc_vdisplay - 1) |
7308                    ((crtc_vtotal - 1) << 16));
7309         I915_WRITE(VBLANK(cpu_transcoder),
7310                    (adjusted_mode->crtc_vblank_start - 1) |
7311                    ((crtc_vblank_end - 1) << 16));
7312         I915_WRITE(VSYNC(cpu_transcoder),
7313                    (adjusted_mode->crtc_vsync_start - 1) |
7314                    ((adjusted_mode->crtc_vsync_end - 1) << 16));
7315
7316         /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
7317          * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
7318          * documented on the DDI_FUNC_CTL register description, EDP Input Select
7319          * bits. */
7320         if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
7321             (pipe == PIPE_B || pipe == PIPE_C))
7322                 I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
7323
7324 }
7325
7326 static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc)
7327 {
7328         struct drm_device *dev = intel_crtc->base.dev;
7329         struct drm_i915_private *dev_priv = to_i915(dev);
7330         enum pipe pipe = intel_crtc->pipe;
7331
7332         /* pipesrc controls the size that is scaled from, which should
7333          * always be the user's requested size.
7334          */
7335         I915_WRITE(PIPESRC(pipe),
7336                    ((intel_crtc->config->pipe_src_w - 1) << 16) |
7337                    (intel_crtc->config->pipe_src_h - 1));
7338 }
7339
7340 static void intel_get_pipe_timings(struct intel_crtc *crtc,
7341                                    struct intel_crtc_state *pipe_config)
7342 {
7343         struct drm_device *dev = crtc->base.dev;
7344         struct drm_i915_private *dev_priv = to_i915(dev);
7345         enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
7346         uint32_t tmp;
7347
7348         tmp = I915_READ(HTOTAL(cpu_transcoder));
7349         pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
7350         pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
7351         tmp = I915_READ(HBLANK(cpu_transcoder));
7352         pipe_config->base.adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
7353         pipe_config->base.adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
7354         tmp = I915_READ(HSYNC(cpu_transcoder));
7355         pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
7356         pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
7357
7358         tmp = I915_READ(VTOTAL(cpu_transcoder));
7359         pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
7360         pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
7361         tmp = I915_READ(VBLANK(cpu_transcoder));
7362         pipe_config->base.adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
7363         pipe_config->base.adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
7364         tmp = I915_READ(VSYNC(cpu_transcoder));
7365         pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
7366         pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
7367
7368         if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
7369                 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
7370                 pipe_config->base.adjusted_mode.crtc_vtotal += 1;
7371                 pipe_config->base.adjusted_mode.crtc_vblank_end += 1;
7372         }
7373 }
7374
7375 static void intel_get_pipe_src_size(struct intel_crtc *crtc,
7376                                     struct intel_crtc_state *pipe_config)
7377 {
7378         struct drm_device *dev = crtc->base.dev;
7379         struct drm_i915_private *dev_priv = to_i915(dev);
7380         u32 tmp;
7381
7382         tmp = I915_READ(PIPESRC(crtc->pipe));
7383         pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
7384         pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
7385
7386         pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h;
7387         pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w;
7388 }
7389
7390 void intel_mode_from_pipe_config(struct drm_display_mode *mode,
7391                                  struct intel_crtc_state *pipe_config)
7392 {
7393         mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay;
7394         mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal;
7395         mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start;
7396         mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end;
7397
7398         mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay;
7399         mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal;
7400         mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start;
7401         mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end;
7402
7403         mode->flags = pipe_config->base.adjusted_mode.flags;
7404         mode->type = DRM_MODE_TYPE_DRIVER;
7405
7406         mode->clock = pipe_config->base.adjusted_mode.crtc_clock;
7407
7408         mode->hsync = drm_mode_hsync(mode);
7409         mode->vrefresh = drm_mode_vrefresh(mode);
7410         drm_mode_set_name(mode);
7411 }
7412
7413 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
7414 {
7415         struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
7416         uint32_t pipeconf;
7417
7418         pipeconf = 0;
7419
7420         /* we keep both pipes enabled on 830 */
7421         if (IS_I830(dev_priv))
7422                 pipeconf |= I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE;
7423
7424         if (intel_crtc->config->double_wide)
7425                 pipeconf |= PIPECONF_DOUBLE_WIDE;
7426
7427         /* only g4x and later have fancy bpc/dither controls */
7428         if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
7429             IS_CHERRYVIEW(dev_priv)) {
7430                 /* Bspec claims that we can't use dithering for 30bpp pipes. */
7431                 if (intel_crtc->config->dither && intel_crtc->config->pipe_bpp != 30)
7432                         pipeconf |= PIPECONF_DITHER_EN |
7433                                     PIPECONF_DITHER_TYPE_SP;
7434
7435                 switch (intel_crtc->config->pipe_bpp) {
7436                 case 18:
7437                         pipeconf |= PIPECONF_6BPC;
7438                         break;
7439                 case 24:
7440                         pipeconf |= PIPECONF_8BPC;
7441                         break;
7442                 case 30:
7443                         pipeconf |= PIPECONF_10BPC;
7444                         break;
7445                 default:
7446                         /* Case prevented by intel_choose_pipe_bpp_dither. */
7447                         BUG();
7448                 }
7449         }
7450
7451         if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
7452                 if (INTEL_GEN(dev_priv) < 4 ||
7453                     intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
7454                         pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
7455                 else
7456                         pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
7457         } else
7458                 pipeconf |= PIPECONF_PROGRESSIVE;
7459
7460         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
7461              intel_crtc->config->limited_color_range)
7462                 pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
7463
7464         I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf);
7465         POSTING_READ(PIPECONF(intel_crtc->pipe));
7466 }
7467
7468 static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
7469                                    struct intel_crtc_state *crtc_state)
7470 {
7471         struct drm_device *dev = crtc->base.dev;
7472         struct drm_i915_private *dev_priv = to_i915(dev);
7473         const struct intel_limit *limit;
7474         int refclk = 48000;
7475
7476         memset(&crtc_state->dpll_hw_state, 0,
7477                sizeof(crtc_state->dpll_hw_state));
7478
7479         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7480                 if (intel_panel_use_ssc(dev_priv)) {
7481                         refclk = dev_priv->vbt.lvds_ssc_freq;
7482                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7483                 }
7484
7485                 limit = &intel_limits_i8xx_lvds;
7486         } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
7487                 limit = &intel_limits_i8xx_dvo;
7488         } else {
7489                 limit = &intel_limits_i8xx_dac;
7490         }
7491
7492         if (!crtc_state->clock_set &&
7493             !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7494                                  refclk, NULL, &crtc_state->dpll)) {
7495                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7496                 return -EINVAL;
7497         }
7498
7499         i8xx_compute_dpll(crtc, crtc_state, NULL);
7500
7501         return 0;
7502 }
7503
7504 static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
7505                                   struct intel_crtc_state *crtc_state)
7506 {
7507         struct drm_device *dev = crtc->base.dev;
7508         struct drm_i915_private *dev_priv = to_i915(dev);
7509         const struct intel_limit *limit;
7510         int refclk = 96000;
7511
7512         memset(&crtc_state->dpll_hw_state, 0,
7513                sizeof(crtc_state->dpll_hw_state));
7514
7515         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7516                 if (intel_panel_use_ssc(dev_priv)) {
7517                         refclk = dev_priv->vbt.lvds_ssc_freq;
7518                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7519                 }
7520
7521                 if (intel_is_dual_link_lvds(dev))
7522                         limit = &intel_limits_g4x_dual_channel_lvds;
7523                 else
7524                         limit = &intel_limits_g4x_single_channel_lvds;
7525         } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
7526                    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
7527                 limit = &intel_limits_g4x_hdmi;
7528         } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
7529                 limit = &intel_limits_g4x_sdvo;
7530         } else {
7531                 /* The option is for other outputs */
7532                 limit = &intel_limits_i9xx_sdvo;
7533         }
7534
7535         if (!crtc_state->clock_set &&
7536             !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7537                                 refclk, NULL, &crtc_state->dpll)) {
7538                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7539                 return -EINVAL;
7540         }
7541
7542         i9xx_compute_dpll(crtc, crtc_state, NULL);
7543
7544         return 0;
7545 }
7546
7547 static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
7548                                   struct intel_crtc_state *crtc_state)
7549 {
7550         struct drm_device *dev = crtc->base.dev;
7551         struct drm_i915_private *dev_priv = to_i915(dev);
7552         const struct intel_limit *limit;
7553         int refclk = 96000;
7554
7555         memset(&crtc_state->dpll_hw_state, 0,
7556                sizeof(crtc_state->dpll_hw_state));
7557
7558         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7559                 if (intel_panel_use_ssc(dev_priv)) {
7560                         refclk = dev_priv->vbt.lvds_ssc_freq;
7561                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7562                 }
7563
7564                 limit = &intel_limits_pineview_lvds;
7565         } else {
7566                 limit = &intel_limits_pineview_sdvo;
7567         }
7568
7569         if (!crtc_state->clock_set &&
7570             !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7571                                 refclk, NULL, &crtc_state->dpll)) {
7572                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7573                 return -EINVAL;
7574         }
7575
7576         i9xx_compute_dpll(crtc, crtc_state, NULL);
7577
7578         return 0;
7579 }
7580
7581 static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
7582                                    struct intel_crtc_state *crtc_state)
7583 {
7584         struct drm_device *dev = crtc->base.dev;
7585         struct drm_i915_private *dev_priv = to_i915(dev);
7586         const struct intel_limit *limit;
7587         int refclk = 96000;
7588
7589         memset(&crtc_state->dpll_hw_state, 0,
7590                sizeof(crtc_state->dpll_hw_state));
7591
7592         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7593                 if (intel_panel_use_ssc(dev_priv)) {
7594                         refclk = dev_priv->vbt.lvds_ssc_freq;
7595                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7596                 }
7597
7598                 limit = &intel_limits_i9xx_lvds;
7599         } else {
7600                 limit = &intel_limits_i9xx_sdvo;
7601         }
7602
7603         if (!crtc_state->clock_set &&
7604             !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7605                                  refclk, NULL, &crtc_state->dpll)) {
7606                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7607                 return -EINVAL;
7608         }
7609
7610         i9xx_compute_dpll(crtc, crtc_state, NULL);
7611
7612         return 0;
7613 }
7614
7615 static int chv_crtc_compute_clock(struct intel_crtc *crtc,
7616                                   struct intel_crtc_state *crtc_state)
7617 {
7618         int refclk = 100000;
7619         const struct intel_limit *limit = &intel_limits_chv;
7620
7621         memset(&crtc_state->dpll_hw_state, 0,
7622                sizeof(crtc_state->dpll_hw_state));
7623
7624         if (!crtc_state->clock_set &&
7625             !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7626                                 refclk, NULL, &crtc_state->dpll)) {
7627                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7628                 return -EINVAL;
7629         }
7630
7631         chv_compute_dpll(crtc, crtc_state);
7632
7633         return 0;
7634 }
7635
7636 static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
7637                                   struct intel_crtc_state *crtc_state)
7638 {
7639         int refclk = 100000;
7640         const struct intel_limit *limit = &intel_limits_vlv;
7641
7642         memset(&crtc_state->dpll_hw_state, 0,
7643                sizeof(crtc_state->dpll_hw_state));
7644
7645         if (!crtc_state->clock_set &&
7646             !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7647                                 refclk, NULL, &crtc_state->dpll)) {
7648                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7649                 return -EINVAL;
7650         }
7651
7652         vlv_compute_dpll(crtc, crtc_state);
7653
7654         return 0;
7655 }
7656
7657 static void i9xx_get_pfit_config(struct intel_crtc *crtc,
7658                                  struct intel_crtc_state *pipe_config)
7659 {
7660         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7661         uint32_t tmp;
7662
7663         if (INTEL_GEN(dev_priv) <= 3 &&
7664             (IS_I830(dev_priv) || !IS_MOBILE(dev_priv)))
7665                 return;
7666
7667         tmp = I915_READ(PFIT_CONTROL);
7668         if (!(tmp & PFIT_ENABLE))
7669                 return;
7670
7671         /* Check whether the pfit is attached to our pipe. */
7672         if (INTEL_GEN(dev_priv) < 4) {
7673                 if (crtc->pipe != PIPE_B)
7674                         return;
7675         } else {
7676                 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
7677                         return;
7678         }
7679
7680         pipe_config->gmch_pfit.control = tmp;
7681         pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
7682 }
7683
7684 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
7685                                struct intel_crtc_state *pipe_config)
7686 {
7687         struct drm_device *dev = crtc->base.dev;
7688         struct drm_i915_private *dev_priv = to_i915(dev);
7689         int pipe = pipe_config->cpu_transcoder;
7690         struct dpll clock;
7691         u32 mdiv;
7692         int refclk = 100000;
7693
7694         /* In case of DSI, DPLL will not be used */
7695         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7696                 return;
7697
7698         mutex_lock(&dev_priv->sb_lock);
7699         mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
7700         mutex_unlock(&dev_priv->sb_lock);
7701
7702         clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
7703         clock.m2 = mdiv & DPIO_M2DIV_MASK;
7704         clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
7705         clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
7706         clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
7707
7708         pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
7709 }
7710
7711 static void
7712 i9xx_get_initial_plane_config(struct intel_crtc *crtc,
7713                               struct intel_initial_plane_config *plane_config)
7714 {
7715         struct drm_device *dev = crtc->base.dev;
7716         struct drm_i915_private *dev_priv = to_i915(dev);
7717         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
7718         enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
7719         enum pipe pipe;
7720         u32 val, base, offset;
7721         int fourcc, pixel_format;
7722         unsigned int aligned_height;
7723         struct drm_framebuffer *fb;
7724         struct intel_framebuffer *intel_fb;
7725
7726         if (!plane->get_hw_state(plane, &pipe))
7727                 return;
7728
7729         WARN_ON(pipe != crtc->pipe);
7730
7731         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
7732         if (!intel_fb) {
7733                 DRM_DEBUG_KMS("failed to alloc fb\n");
7734                 return;
7735         }
7736
7737         fb = &intel_fb->base;
7738
7739         fb->dev = dev;
7740
7741         val = I915_READ(DSPCNTR(i9xx_plane));
7742
7743         if (INTEL_GEN(dev_priv) >= 4) {
7744                 if (val & DISPPLANE_TILED) {
7745                         plane_config->tiling = I915_TILING_X;
7746                         fb->modifier = I915_FORMAT_MOD_X_TILED;
7747                 }
7748         }
7749
7750         pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
7751         fourcc = i9xx_format_to_fourcc(pixel_format);
7752         fb->format = drm_format_info(fourcc);
7753
7754         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
7755                 offset = I915_READ(DSPOFFSET(i9xx_plane));
7756                 base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
7757         } else if (INTEL_GEN(dev_priv) >= 4) {
7758                 if (plane_config->tiling)
7759                         offset = I915_READ(DSPTILEOFF(i9xx_plane));
7760                 else
7761                         offset = I915_READ(DSPLINOFF(i9xx_plane));
7762                 base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
7763         } else {
7764                 base = I915_READ(DSPADDR(i9xx_plane));
7765         }
7766         plane_config->base = base;
7767
7768         val = I915_READ(PIPESRC(pipe));
7769         fb->width = ((val >> 16) & 0xfff) + 1;
7770         fb->height = ((val >> 0) & 0xfff) + 1;
7771
7772         val = I915_READ(DSPSTRIDE(i9xx_plane));
7773         fb->pitches[0] = val & 0xffffffc0;
7774
7775         aligned_height = intel_fb_align_height(fb, 0, fb->height);
7776
7777         plane_config->size = fb->pitches[0] * aligned_height;
7778
7779         DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
7780                       crtc->base.name, plane->base.name, fb->width, fb->height,
7781                       fb->format->cpp[0] * 8, base, fb->pitches[0],
7782                       plane_config->size);
7783
7784         plane_config->fb = intel_fb;
7785 }
7786
7787 static void chv_crtc_clock_get(struct intel_crtc *crtc,
7788                                struct intel_crtc_state *pipe_config)
7789 {
7790         struct drm_device *dev = crtc->base.dev;
7791         struct drm_i915_private *dev_priv = to_i915(dev);
7792         int pipe = pipe_config->cpu_transcoder;
7793         enum dpio_channel port = vlv_pipe_to_channel(pipe);
7794         struct dpll clock;
7795         u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
7796         int refclk = 100000;
7797
7798         /* In case of DSI, DPLL will not be used */
7799         if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7800                 return;
7801
7802         mutex_lock(&dev_priv->sb_lock);
7803         cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
7804         pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
7805         pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
7806         pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
7807         pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
7808         mutex_unlock(&dev_priv->sb_lock);
7809
7810         clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
7811         clock.m2 = (pll_dw0 & 0xff) << 22;
7812         if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
7813                 clock.m2 |= pll_dw2 & 0x3fffff;
7814         clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
7815         clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
7816         clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
7817
7818         pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
7819 }
7820
7821 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
7822                                  struct intel_crtc_state *pipe_config)
7823 {
7824         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7825         enum intel_display_power_domain power_domain;
7826         uint32_t tmp;
7827         bool ret;
7828
7829         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
7830         if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
7831                 return false;
7832
7833         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
7834         pipe_config->shared_dpll = NULL;
7835
7836         ret = false;
7837
7838         tmp = I915_READ(PIPECONF(crtc->pipe));
7839         if (!(tmp & PIPECONF_ENABLE))
7840                 goto out;
7841
7842         if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
7843             IS_CHERRYVIEW(dev_priv)) {
7844                 switch (tmp & PIPECONF_BPC_MASK) {
7845                 case PIPECONF_6BPC:
7846                         pipe_config->pipe_bpp = 18;
7847                         break;
7848                 case PIPECONF_8BPC:
7849                         pipe_config->pipe_bpp = 24;
7850                         break;
7851                 case PIPECONF_10BPC:
7852                         pipe_config->pipe_bpp = 30;
7853                         break;
7854                 default:
7855                         break;
7856                 }
7857         }
7858
7859         if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
7860             (tmp & PIPECONF_COLOR_RANGE_SELECT))
7861                 pipe_config->limited_color_range = true;
7862
7863         if (INTEL_GEN(dev_priv) < 4)
7864                 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
7865
7866         intel_get_pipe_timings(crtc, pipe_config);
7867         intel_get_pipe_src_size(crtc, pipe_config);
7868
7869         i9xx_get_pfit_config(crtc, pipe_config);
7870
7871         if (INTEL_GEN(dev_priv) >= 4) {
7872                 /* No way to read it out on pipes B and C */
7873                 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
7874                         tmp = dev_priv->chv_dpll_md[crtc->pipe];
7875                 else
7876                         tmp = I915_READ(DPLL_MD(crtc->pipe));
7877                 pipe_config->pixel_multiplier =
7878                         ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
7879                          >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
7880                 pipe_config->dpll_hw_state.dpll_md = tmp;
7881         } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
7882                    IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
7883                 tmp = I915_READ(DPLL(crtc->pipe));
7884                 pipe_config->pixel_multiplier =
7885                         ((tmp & SDVO_MULTIPLIER_MASK)
7886                          >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
7887         } else {
7888                 /* Note that on i915G/GM the pixel multiplier is in the sdvo
7889                  * port and will be fixed up in the encoder->get_config
7890                  * function. */
7891                 pipe_config->pixel_multiplier = 1;
7892         }
7893         pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
7894         if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
7895                 /*
7896                  * DPLL_DVO_2X_MODE must be enabled for both DPLLs
7897                  * on 830. Filter it out here so that we don't
7898                  * report errors due to that.
7899                  */
7900                 if (IS_I830(dev_priv))
7901                         pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE;
7902
7903                 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
7904                 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
7905         } else {
7906                 /* Mask out read-only status bits. */
7907                 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
7908                                                      DPLL_PORTC_READY_MASK |
7909                                                      DPLL_PORTB_READY_MASK);
7910         }
7911
7912         if (IS_CHERRYVIEW(dev_priv))
7913                 chv_crtc_clock_get(crtc, pipe_config);
7914         else if (IS_VALLEYVIEW(dev_priv))
7915                 vlv_crtc_clock_get(crtc, pipe_config);
7916         else
7917                 i9xx_crtc_clock_get(crtc, pipe_config);
7918
7919         /*
7920          * Normally the dotclock is filled in by the encoder .get_config()
7921          * but in case the pipe is enabled w/o any ports we need a sane
7922          * default.
7923          */
7924         pipe_config->base.adjusted_mode.crtc_clock =
7925                 pipe_config->port_clock / pipe_config->pixel_multiplier;
7926
7927         ret = true;
7928
7929 out:
7930         intel_display_power_put(dev_priv, power_domain);
7931
7932         return ret;
7933 }
7934
7935 static void ironlake_init_pch_refclk(struct drm_i915_private *dev_priv)
7936 {
7937         struct intel_encoder *encoder;
7938         int i;
7939         u32 val, final;
7940         bool has_lvds = false;
7941         bool has_cpu_edp = false;
7942         bool has_panel = false;
7943         bool has_ck505 = false;
7944         bool can_ssc = false;
7945         bool using_ssc_source = false;
7946
7947         /* We need to take the global config into account */
7948         for_each_intel_encoder(&dev_priv->drm, encoder) {
7949                 switch (encoder->type) {
7950                 case INTEL_OUTPUT_LVDS:
7951                         has_panel = true;
7952                         has_lvds = true;
7953                         break;
7954                 case INTEL_OUTPUT_EDP:
7955                         has_panel = true;
7956                         if (encoder->port == PORT_A)
7957                                 has_cpu_edp = true;
7958                         break;
7959                 default:
7960                         break;
7961                 }
7962         }
7963
7964         if (HAS_PCH_IBX(dev_priv)) {
7965                 has_ck505 = dev_priv->vbt.display_clock_mode;
7966                 can_ssc = has_ck505;
7967         } else {
7968                 has_ck505 = false;
7969                 can_ssc = true;
7970         }
7971
7972         /* Check if any DPLLs are using the SSC source */
7973         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
7974                 u32 temp = I915_READ(PCH_DPLL(i));
7975
7976                 if (!(temp & DPLL_VCO_ENABLE))
7977                         continue;
7978
7979                 if ((temp & PLL_REF_INPUT_MASK) ==
7980                     PLLB_REF_INPUT_SPREADSPECTRUMIN) {
7981                         using_ssc_source = true;
7982                         break;
7983                 }
7984         }
7985
7986         DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
7987                       has_panel, has_lvds, has_ck505, using_ssc_source);
7988
7989         /* Ironlake: try to setup display ref clock before DPLL
7990          * enabling. This is only under driver's control after
7991          * PCH B stepping, previous chipset stepping should be
7992          * ignoring this setting.
7993          */
7994         val = I915_READ(PCH_DREF_CONTROL);
7995
7996         /* As we must carefully and slowly disable/enable each source in turn,
7997          * compute the final state we want first and check if we need to
7998          * make any changes at all.
7999          */
8000         final = val;
8001         final &= ~DREF_NONSPREAD_SOURCE_MASK;
8002         if (has_ck505)
8003                 final |= DREF_NONSPREAD_CK505_ENABLE;
8004         else
8005                 final |= DREF_NONSPREAD_SOURCE_ENABLE;
8006
8007         final &= ~DREF_SSC_SOURCE_MASK;
8008         final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8009         final &= ~DREF_SSC1_ENABLE;
8010
8011         if (has_panel) {
8012                 final |= DREF_SSC_SOURCE_ENABLE;
8013
8014                 if (intel_panel_use_ssc(dev_priv) && can_ssc)
8015                         final |= DREF_SSC1_ENABLE;
8016
8017                 if (has_cpu_edp) {
8018                         if (intel_panel_use_ssc(dev_priv) && can_ssc)
8019                                 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
8020                         else
8021                                 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
8022                 } else
8023                         final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8024         } else if (using_ssc_source) {
8025                 final |= DREF_SSC_SOURCE_ENABLE;
8026                 final |= DREF_SSC1_ENABLE;
8027         }
8028
8029         if (final == val)
8030                 return;
8031
8032         /* Always enable nonspread source */
8033         val &= ~DREF_NONSPREAD_SOURCE_MASK;
8034
8035         if (has_ck505)
8036                 val |= DREF_NONSPREAD_CK505_ENABLE;
8037         else
8038                 val |= DREF_NONSPREAD_SOURCE_ENABLE;
8039
8040         if (has_panel) {
8041                 val &= ~DREF_SSC_SOURCE_MASK;
8042                 val |= DREF_SSC_SOURCE_ENABLE;
8043
8044                 /* SSC must be turned on before enabling the CPU output  */
8045                 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
8046                         DRM_DEBUG_KMS("Using SSC on panel\n");
8047                         val |= DREF_SSC1_ENABLE;
8048                 } else
8049                         val &= ~DREF_SSC1_ENABLE;
8050
8051                 /* Get SSC going before enabling the outputs */
8052                 I915_WRITE(PCH_DREF_CONTROL, val);
8053                 POSTING_READ(PCH_DREF_CONTROL);
8054                 udelay(200);
8055
8056                 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8057
8058                 /* Enable CPU source on CPU attached eDP */
8059                 if (has_cpu_edp) {
8060                         if (intel_panel_use_ssc(dev_priv) && can_ssc) {
8061                                 DRM_DEBUG_KMS("Using SSC on eDP\n");
8062                                 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
8063                         } else
8064                                 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
8065                 } else
8066                         val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8067
8068                 I915_WRITE(PCH_DREF_CONTROL, val);
8069                 POSTING_READ(PCH_DREF_CONTROL);
8070                 udelay(200);
8071         } else {
8072                 DRM_DEBUG_KMS("Disabling CPU source output\n");
8073
8074                 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8075
8076                 /* Turn off CPU output */
8077                 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8078
8079                 I915_WRITE(PCH_DREF_CONTROL, val);
8080                 POSTING_READ(PCH_DREF_CONTROL);
8081                 udelay(200);
8082
8083                 if (!using_ssc_source) {
8084                         DRM_DEBUG_KMS("Disabling SSC source\n");
8085
8086                         /* Turn off the SSC source */
8087                         val &= ~DREF_SSC_SOURCE_MASK;
8088                         val |= DREF_SSC_SOURCE_DISABLE;
8089
8090                         /* Turn off SSC1 */
8091                         val &= ~DREF_SSC1_ENABLE;
8092
8093                         I915_WRITE(PCH_DREF_CONTROL, val);
8094                         POSTING_READ(PCH_DREF_CONTROL);
8095                         udelay(200);
8096                 }
8097         }
8098
8099         BUG_ON(val != final);
8100 }
8101
8102 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
8103 {
8104         uint32_t tmp;
8105
8106         tmp = I915_READ(SOUTH_CHICKEN2);
8107         tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
8108         I915_WRITE(SOUTH_CHICKEN2, tmp);
8109
8110         if (wait_for_us(I915_READ(SOUTH_CHICKEN2) &
8111                         FDI_MPHY_IOSFSB_RESET_STATUS, 100))
8112                 DRM_ERROR("FDI mPHY reset assert timeout\n");
8113
8114         tmp = I915_READ(SOUTH_CHICKEN2);
8115         tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
8116         I915_WRITE(SOUTH_CHICKEN2, tmp);
8117
8118         if (wait_for_us((I915_READ(SOUTH_CHICKEN2) &
8119                          FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
8120                 DRM_ERROR("FDI mPHY reset de-assert timeout\n");
8121 }
8122
8123 /* WaMPhyProgramming:hsw */
8124 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
8125 {
8126         uint32_t tmp;
8127
8128         tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
8129         tmp &= ~(0xFF << 24);
8130         tmp |= (0x12 << 24);
8131         intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
8132
8133         tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
8134         tmp |= (1 << 11);
8135         intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
8136
8137         tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
8138         tmp |= (1 << 11);
8139         intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
8140
8141         tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
8142         tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8143         intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
8144
8145         tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
8146         tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8147         intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
8148
8149         tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
8150         tmp &= ~(7 << 13);
8151         tmp |= (5 << 13);
8152         intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
8153
8154         tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
8155         tmp &= ~(7 << 13);
8156         tmp |= (5 << 13);
8157         intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
8158
8159         tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
8160         tmp &= ~0xFF;
8161         tmp |= 0x1C;
8162         intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
8163
8164         tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
8165         tmp &= ~0xFF;
8166         tmp |= 0x1C;
8167         intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
8168
8169         tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
8170         tmp &= ~(0xFF << 16);
8171         tmp |= (0x1C << 16);
8172         intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
8173
8174         tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
8175         tmp &= ~(0xFF << 16);
8176         tmp |= (0x1C << 16);
8177         intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
8178
8179         tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
8180         tmp |= (1 << 27);
8181         intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
8182
8183         tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
8184         tmp |= (1 << 27);
8185         intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
8186
8187         tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
8188         tmp &= ~(0xF << 28);
8189         tmp |= (4 << 28);
8190         intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
8191
8192         tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
8193         tmp &= ~(0xF << 28);
8194         tmp |= (4 << 28);
8195         intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
8196 }
8197
8198 /* Implements 3 different sequences from BSpec chapter "Display iCLK
8199  * Programming" based on the parameters passed:
8200  * - Sequence to enable CLKOUT_DP
8201  * - Sequence to enable CLKOUT_DP without spread
8202  * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
8203  */
8204 static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
8205                                  bool with_spread, bool with_fdi)
8206 {
8207         uint32_t reg, tmp;
8208
8209         if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
8210                 with_spread = true;
8211         if (WARN(HAS_PCH_LPT_LP(dev_priv) &&
8212             with_fdi, "LP PCH doesn't have FDI\n"))
8213                 with_fdi = false;
8214
8215         mutex_lock(&dev_priv->sb_lock);
8216
8217         tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8218         tmp &= ~SBI_SSCCTL_DISABLE;
8219         tmp |= SBI_SSCCTL_PATHALT;
8220         intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8221
8222         udelay(24);
8223
8224         if (with_spread) {
8225                 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8226                 tmp &= ~SBI_SSCCTL_PATHALT;
8227                 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8228
8229                 if (with_fdi) {
8230                         lpt_reset_fdi_mphy(dev_priv);
8231                         lpt_program_fdi_mphy(dev_priv);
8232                 }
8233         }
8234
8235         reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
8236         tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
8237         tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
8238         intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
8239
8240         mutex_unlock(&dev_priv->sb_lock);
8241 }
8242
8243 /* Sequence to disable CLKOUT_DP */
8244 static void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
8245 {
8246         uint32_t reg, tmp;
8247
8248         mutex_lock(&dev_priv->sb_lock);
8249
8250         reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
8251         tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
8252         tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
8253         intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
8254
8255         tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8256         if (!(tmp & SBI_SSCCTL_DISABLE)) {
8257                 if (!(tmp & SBI_SSCCTL_PATHALT)) {
8258                         tmp |= SBI_SSCCTL_PATHALT;
8259                         intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8260                         udelay(32);
8261                 }
8262                 tmp |= SBI_SSCCTL_DISABLE;
8263                 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8264         }
8265
8266         mutex_unlock(&dev_priv->sb_lock);
8267 }
8268
8269 #define BEND_IDX(steps) ((50 + (steps)) / 5)
8270
8271 static const uint16_t sscdivintphase[] = {
8272         [BEND_IDX( 50)] = 0x3B23,
8273         [BEND_IDX( 45)] = 0x3B23,
8274         [BEND_IDX( 40)] = 0x3C23,
8275         [BEND_IDX( 35)] = 0x3C23,
8276         [BEND_IDX( 30)] = 0x3D23,
8277         [BEND_IDX( 25)] = 0x3D23,
8278         [BEND_IDX( 20)] = 0x3E23,
8279         [BEND_IDX( 15)] = 0x3E23,
8280         [BEND_IDX( 10)] = 0x3F23,
8281         [BEND_IDX(  5)] = 0x3F23,
8282         [BEND_IDX(  0)] = 0x0025,
8283         [BEND_IDX( -5)] = 0x0025,
8284         [BEND_IDX(-10)] = 0x0125,
8285         [BEND_IDX(-15)] = 0x0125,
8286         [BEND_IDX(-20)] = 0x0225,
8287         [BEND_IDX(-25)] = 0x0225,
8288         [BEND_IDX(-30)] = 0x0325,
8289         [BEND_IDX(-35)] = 0x0325,
8290         [BEND_IDX(-40)] = 0x0425,
8291         [BEND_IDX(-45)] = 0x0425,
8292         [BEND_IDX(-50)] = 0x0525,
8293 };
8294
8295 /*
8296  * Bend CLKOUT_DP
8297  * steps -50 to 50 inclusive, in steps of 5
8298  * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
8299  * change in clock period = -(steps / 10) * 5.787 ps
8300  */
8301 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
8302 {
8303         uint32_t tmp;
8304         int idx = BEND_IDX(steps);
8305
8306         if (WARN_ON(steps % 5 != 0))
8307                 return;
8308
8309         if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase)))
8310                 return;
8311
8312         mutex_lock(&dev_priv->sb_lock);
8313
8314         if (steps % 10 != 0)
8315                 tmp = 0xAAAAAAAB;
8316         else
8317                 tmp = 0x00000000;
8318         intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
8319
8320         tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
8321         tmp &= 0xffff0000;
8322         tmp |= sscdivintphase[idx];
8323         intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
8324
8325         mutex_unlock(&dev_priv->sb_lock);
8326 }
8327
8328 #undef BEND_IDX
8329
8330 static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
8331 {
8332         struct intel_encoder *encoder;
8333         bool has_vga = false;
8334
8335         for_each_intel_encoder(&dev_priv->drm, encoder) {
8336                 switch (encoder->type) {
8337                 case INTEL_OUTPUT_ANALOG:
8338                         has_vga = true;
8339                         break;
8340                 default:
8341                         break;
8342                 }
8343         }
8344
8345         if (has_vga) {
8346                 lpt_bend_clkout_dp(dev_priv, 0);
8347                 lpt_enable_clkout_dp(dev_priv, true, true);
8348         } else {
8349                 lpt_disable_clkout_dp(dev_priv);
8350         }
8351 }
8352
8353 /*
8354  * Initialize reference clocks when the driver loads
8355  */
8356 void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
8357 {
8358         if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
8359                 ironlake_init_pch_refclk(dev_priv);
8360         else if (HAS_PCH_LPT(dev_priv))
8361                 lpt_init_pch_refclk(dev_priv);
8362 }
8363
8364 static void ironlake_set_pipeconf(struct drm_crtc *crtc)
8365 {
8366         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
8367         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8368         int pipe = intel_crtc->pipe;
8369         uint32_t val;
8370
8371         val = 0;
8372
8373         switch (intel_crtc->config->pipe_bpp) {
8374         case 18:
8375                 val |= PIPECONF_6BPC;
8376                 break;
8377         case 24:
8378                 val |= PIPECONF_8BPC;
8379                 break;
8380         case 30:
8381                 val |= PIPECONF_10BPC;
8382                 break;
8383         case 36:
8384                 val |= PIPECONF_12BPC;
8385                 break;
8386         default:
8387                 /* Case prevented by intel_choose_pipe_bpp_dither. */
8388                 BUG();
8389         }
8390
8391         if (intel_crtc->config->dither)
8392                 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
8393
8394         if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
8395                 val |= PIPECONF_INTERLACED_ILK;
8396         else
8397                 val |= PIPECONF_PROGRESSIVE;
8398
8399         if (intel_crtc->config->limited_color_range)
8400                 val |= PIPECONF_COLOR_RANGE_SELECT;
8401
8402         I915_WRITE(PIPECONF(pipe), val);
8403         POSTING_READ(PIPECONF(pipe));
8404 }
8405
8406 static void haswell_set_pipeconf(struct drm_crtc *crtc)
8407 {
8408         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
8409         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8410         enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
8411         u32 val = 0;
8412
8413         if (IS_HASWELL(dev_priv) && intel_crtc->config->dither)
8414                 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
8415
8416         if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
8417                 val |= PIPECONF_INTERLACED_ILK;
8418         else
8419                 val |= PIPECONF_PROGRESSIVE;
8420
8421         I915_WRITE(PIPECONF(cpu_transcoder), val);
8422         POSTING_READ(PIPECONF(cpu_transcoder));
8423 }
8424
8425 static void haswell_set_pipemisc(struct drm_crtc *crtc)
8426 {
8427         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
8428         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8429         struct intel_crtc_state *config = intel_crtc->config;
8430
8431         if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
8432                 u32 val = 0;
8433
8434                 switch (intel_crtc->config->pipe_bpp) {
8435                 case 18:
8436                         val |= PIPEMISC_DITHER_6_BPC;
8437                         break;
8438                 case 24:
8439                         val |= PIPEMISC_DITHER_8_BPC;
8440                         break;
8441                 case 30:
8442                         val |= PIPEMISC_DITHER_10_BPC;
8443                         break;
8444                 case 36:
8445                         val |= PIPEMISC_DITHER_12_BPC;
8446                         break;
8447                 default:
8448                         /* Case prevented by pipe_config_set_bpp. */
8449                         BUG();
8450                 }
8451
8452                 if (intel_crtc->config->dither)
8453                         val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
8454
8455                 if (config->ycbcr420) {
8456                         val |= PIPEMISC_OUTPUT_COLORSPACE_YUV |
8457                                 PIPEMISC_YUV420_ENABLE |
8458                                 PIPEMISC_YUV420_MODE_FULL_BLEND;
8459                 }
8460
8461                 I915_WRITE(PIPEMISC(intel_crtc->pipe), val);
8462         }
8463 }
8464
8465 int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
8466 {
8467         /*
8468          * Account for spread spectrum to avoid
8469          * oversubscribing the link. Max center spread
8470          * is 2.5%; use 5% for safety's sake.
8471          */
8472         u32 bps = target_clock * bpp * 21 / 20;
8473         return DIV_ROUND_UP(bps, link_bw * 8);
8474 }
8475
8476 static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
8477 {
8478         return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
8479 }
8480
8481 static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
8482                                   struct intel_crtc_state *crtc_state,
8483                                   struct dpll *reduced_clock)
8484 {
8485         struct drm_crtc *crtc = &intel_crtc->base;
8486         struct drm_device *dev = crtc->dev;
8487         struct drm_i915_private *dev_priv = to_i915(dev);
8488         u32 dpll, fp, fp2;
8489         int factor;
8490
8491         /* Enable autotuning of the PLL clock (if permissible) */
8492         factor = 21;
8493         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8494                 if ((intel_panel_use_ssc(dev_priv) &&
8495                      dev_priv->vbt.lvds_ssc_freq == 100000) ||
8496                     (HAS_PCH_IBX(dev_priv) && intel_is_dual_link_lvds(dev)))
8497                         factor = 25;
8498         } else if (crtc_state->sdvo_tv_clock)
8499                 factor = 20;
8500
8501         fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
8502
8503         if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor))
8504                 fp |= FP_CB_TUNE;
8505
8506         if (reduced_clock) {
8507                 fp2 = i9xx_dpll_compute_fp(reduced_clock);
8508
8509                 if (reduced_clock->m < factor * reduced_clock->n)
8510                         fp2 |= FP_CB_TUNE;
8511         } else {
8512                 fp2 = fp;
8513         }
8514
8515         dpll = 0;
8516
8517         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
8518                 dpll |= DPLLB_MODE_LVDS;
8519         else
8520                 dpll |= DPLLB_MODE_DAC_SERIAL;
8521
8522         dpll |= (crtc_state->pixel_multiplier - 1)
8523                 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
8524
8525         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
8526             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
8527                 dpll |= DPLL_SDVO_HIGH_SPEED;
8528
8529         if (intel_crtc_has_dp_encoder(crtc_state))
8530                 dpll |= DPLL_SDVO_HIGH_SPEED;
8531
8532         /*
8533          * The high speed IO clock is only really required for
8534          * SDVO/HDMI/DP, but we also enable it for CRT to make it
8535          * possible to share the DPLL between CRT and HDMI. Enabling
8536          * the clock needlessly does no real harm, except use up a
8537          * bit of power potentially.
8538          *
8539          * We'll limit this to IVB with 3 pipes, since it has only two
8540          * DPLLs and so DPLL sharing is the only way to get three pipes
8541          * driving PCH ports at the same time. On SNB we could do this,
8542          * and potentially avoid enabling the second DPLL, but it's not
8543          * clear if it''s a win or loss power wise. No point in doing
8544          * this on ILK at all since it has a fixed DPLL<->pipe mapping.
8545          */
8546         if (INTEL_INFO(dev_priv)->num_pipes == 3 &&
8547             intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
8548                 dpll |= DPLL_SDVO_HIGH_SPEED;
8549
8550         /* compute bitmask from p1 value */
8551         dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8552         /* also FPA1 */
8553         dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
8554
8555         switch (crtc_state->dpll.p2) {
8556         case 5:
8557                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
8558                 break;
8559         case 7:
8560                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
8561                 break;
8562         case 10:
8563                 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
8564                 break;
8565         case 14:
8566                 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
8567                 break;
8568         }
8569
8570         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
8571             intel_panel_use_ssc(dev_priv))
8572                 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
8573         else
8574                 dpll |= PLL_REF_INPUT_DREFCLK;
8575
8576         dpll |= DPLL_VCO_ENABLE;
8577
8578         crtc_state->dpll_hw_state.dpll = dpll;
8579         crtc_state->dpll_hw_state.fp0 = fp;
8580         crtc_state->dpll_hw_state.fp1 = fp2;
8581 }
8582
8583 static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
8584                                        struct intel_crtc_state *crtc_state)
8585 {
8586         struct drm_device *dev = crtc->base.dev;
8587         struct drm_i915_private *dev_priv = to_i915(dev);
8588         const struct intel_limit *limit;
8589         int refclk = 120000;
8590
8591         memset(&crtc_state->dpll_hw_state, 0,
8592                sizeof(crtc_state->dpll_hw_state));
8593
8594         /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
8595         if (!crtc_state->has_pch_encoder)
8596                 return 0;
8597
8598         if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8599                 if (intel_panel_use_ssc(dev_priv)) {
8600                         DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
8601                                       dev_priv->vbt.lvds_ssc_freq);
8602                         refclk = dev_priv->vbt.lvds_ssc_freq;
8603                 }
8604
8605                 if (intel_is_dual_link_lvds(dev)) {
8606                         if (refclk == 100000)
8607                                 limit = &intel_limits_ironlake_dual_lvds_100m;
8608                         else
8609                                 limit = &intel_limits_ironlake_dual_lvds;
8610                 } else {
8611                         if (refclk == 100000)
8612                                 limit = &intel_limits_ironlake_single_lvds_100m;
8613                         else
8614                                 limit = &intel_limits_ironlake_single_lvds;
8615                 }
8616         } else {
8617                 limit = &intel_limits_ironlake_dac;
8618         }
8619
8620         if (!crtc_state->clock_set &&
8621             !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8622                                 refclk, NULL, &crtc_state->dpll)) {
8623                 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8624                 return -EINVAL;
8625         }
8626
8627         ironlake_compute_dpll(crtc, crtc_state, NULL);
8628
8629         if (!intel_get_shared_dpll(crtc, crtc_state, NULL)) {
8630                 DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
8631                                  pipe_name(crtc->pipe));
8632                 return -EINVAL;
8633         }
8634
8635         return 0;
8636 }
8637
8638 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
8639                                          struct intel_link_m_n *m_n)
8640 {
8641         struct drm_device *dev = crtc->base.dev;
8642         struct drm_i915_private *dev_priv = to_i915(dev);
8643         enum pipe pipe = crtc->pipe;
8644
8645         m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
8646         m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
8647         m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
8648                 & ~TU_SIZE_MASK;
8649         m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
8650         m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
8651                     & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
8652 }
8653
8654 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
8655                                          enum transcoder transcoder,
8656                                          struct intel_link_m_n *m_n,
8657                                          struct intel_link_m_n *m2_n2)
8658 {
8659         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8660         enum pipe pipe = crtc->pipe;
8661
8662         if (INTEL_GEN(dev_priv) >= 5) {
8663                 m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
8664                 m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
8665                 m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
8666                         & ~TU_SIZE_MASK;
8667                 m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
8668                 m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
8669                             & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
8670                 /* Read M2_N2 registers only for gen < 8 (M2_N2 available for
8671                  * gen < 8) and if DRRS is supported (to make sure the
8672                  * registers are not unnecessarily read).
8673                  */
8674                 if (m2_n2 && INTEL_GEN(dev_priv) < 8 &&
8675                         crtc->config->has_drrs) {
8676                         m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
8677                         m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder));
8678                         m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder))
8679                                         & ~TU_SIZE_MASK;
8680                         m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder));
8681                         m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
8682                                         & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
8683                 }
8684         } else {
8685                 m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
8686                 m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
8687                 m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
8688                         & ~TU_SIZE_MASK;
8689                 m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
8690                 m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
8691                             & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
8692         }
8693 }
8694
8695 void intel_dp_get_m_n(struct intel_crtc *crtc,
8696                       struct intel_crtc_state *pipe_config)
8697 {
8698         if (pipe_config->has_pch_encoder)
8699                 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
8700         else
8701                 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
8702                                              &pipe_config->dp_m_n,
8703                                              &pipe_config->dp_m2_n2);
8704 }
8705
8706 static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
8707                                         struct intel_crtc_state *pipe_config)
8708 {
8709         intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
8710                                      &pipe_config->fdi_m_n, NULL);
8711 }
8712
8713 static void skylake_get_pfit_config(struct intel_crtc *crtc,
8714                                     struct intel_crtc_state *pipe_config)
8715 {
8716         struct drm_device *dev = crtc->base.dev;
8717         struct drm_i915_private *dev_priv = to_i915(dev);
8718         struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
8719         uint32_t ps_ctrl = 0;
8720         int id = -1;
8721         int i;
8722
8723         /* find scaler attached to this pipe */
8724         for (i = 0; i < crtc->num_scalers; i++) {
8725                 ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i));
8726                 if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
8727                         id = i;
8728                         pipe_config->pch_pfit.enabled = true;
8729                         pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i));
8730                         pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i));
8731                         break;
8732                 }
8733         }
8734
8735         scaler_state->scaler_id = id;
8736         if (id >= 0) {
8737                 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
8738         } else {
8739                 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
8740         }
8741 }
8742
8743 static void
8744 skylake_get_initial_plane_config(struct intel_crtc *crtc,
8745                                  struct intel_initial_plane_config *plane_config)
8746 {
8747         struct drm_device *dev = crtc->base.dev;
8748         struct drm_i915_private *dev_priv = to_i915(dev);
8749         struct intel_plane *plane = to_intel_plane(crtc->base.primary);
8750         enum plane_id plane_id = plane->id;
8751         enum pipe pipe;
8752         u32 val, base, offset, stride_mult, tiling, alpha;
8753         int fourcc, pixel_format;
8754         unsigned int aligned_height;
8755         struct drm_framebuffer *fb;
8756         struct intel_framebuffer *intel_fb;
8757
8758         if (!plane->get_hw_state(plane, &pipe))
8759                 return;
8760
8761         WARN_ON(pipe != crtc->pipe);
8762
8763         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
8764         if (!intel_fb) {
8765                 DRM_DEBUG_KMS("failed to alloc fb\n");
8766                 return;
8767         }
8768
8769         fb = &intel_fb->base;
8770
8771         fb->dev = dev;
8772
8773         val = I915_READ(PLANE_CTL(pipe, plane_id));
8774
8775         if (INTEL_GEN(dev_priv) >= 11)
8776                 pixel_format = val & ICL_PLANE_CTL_FORMAT_MASK;
8777         else
8778                 pixel_format = val & PLANE_CTL_FORMAT_MASK;
8779
8780         if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
8781                 alpha = I915_READ(PLANE_COLOR_CTL(pipe, plane_id));
8782                 alpha &= PLANE_COLOR_ALPHA_MASK;
8783         } else {
8784                 alpha = val & PLANE_CTL_ALPHA_MASK;
8785         }
8786
8787         fourcc = skl_format_to_fourcc(pixel_format,
8788                                       val & PLANE_CTL_ORDER_RGBX, alpha);
8789         fb->format = drm_format_info(fourcc);
8790
8791         tiling = val & PLANE_CTL_TILED_MASK;
8792         switch (tiling) {
8793         case PLANE_CTL_TILED_LINEAR:
8794                 fb->modifier = DRM_FORMAT_MOD_LINEAR;
8795                 break;
8796         case PLANE_CTL_TILED_X:
8797                 plane_config->tiling = I915_TILING_X;
8798                 fb->modifier = I915_FORMAT_MOD_X_TILED;
8799                 break;
8800         case PLANE_CTL_TILED_Y:
8801                 if (val & PLANE_CTL_DECOMPRESSION_ENABLE)
8802                         fb->modifier = I915_FORMAT_MOD_Y_TILED_CCS;
8803                 else
8804                         fb->modifier = I915_FORMAT_MOD_Y_TILED;
8805                 break;
8806         case PLANE_CTL_TILED_YF:
8807                 if (val & PLANE_CTL_DECOMPRESSION_ENABLE)
8808                         fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS;
8809                 else
8810                         fb->modifier = I915_FORMAT_MOD_Yf_TILED;
8811                 break;
8812         default:
8813                 MISSING_CASE(tiling);
8814                 goto error;
8815         }
8816
8817         base = I915_READ(PLANE_SURF(pipe, plane_id)) & 0xfffff000;
8818         plane_config->base = base;
8819
8820         offset = I915_READ(PLANE_OFFSET(pipe, plane_id));
8821
8822         val = I915_READ(PLANE_SIZE(pipe, plane_id));
8823         fb->height = ((val >> 16) & 0xfff) + 1;
8824         fb->width = ((val >> 0) & 0x1fff) + 1;
8825
8826         val = I915_READ(PLANE_STRIDE(pipe, plane_id));
8827         stride_mult = intel_fb_stride_alignment(fb, 0);
8828         fb->pitches[0] = (val & 0x3ff) * stride_mult;
8829
8830         aligned_height = intel_fb_align_height(fb, 0, fb->height);
8831
8832         plane_config->size = fb->pitches[0] * aligned_height;
8833
8834         DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
8835                       crtc->base.name, plane->base.name, fb->width, fb->height,
8836                       fb->format->cpp[0] * 8, base, fb->pitches[0],
8837                       plane_config->size);
8838
8839         plane_config->fb = intel_fb;
8840         return;
8841
8842 error:
8843         kfree(intel_fb);
8844 }
8845
8846 static void ironlake_get_pfit_config(struct intel_crtc *crtc,
8847                                      struct intel_crtc_state *pipe_config)
8848 {
8849         struct drm_device *dev = crtc->base.dev;
8850         struct drm_i915_private *dev_priv = to_i915(dev);
8851         uint32_t tmp;
8852
8853         tmp = I915_READ(PF_CTL(crtc->pipe));
8854
8855         if (tmp & PF_ENABLE) {
8856                 pipe_config->pch_pfit.enabled = true;
8857                 pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
8858                 pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
8859
8860                 /* We currently do not free assignements of panel fitters on
8861                  * ivb/hsw (since we don't use the higher upscaling modes which
8862                  * differentiates them) so just WARN about this case for now. */
8863                 if (IS_GEN7(dev_priv)) {
8864                         WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
8865                                 PF_PIPE_SEL_IVB(crtc->pipe));
8866                 }
8867         }
8868 }
8869
8870 static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
8871                                      struct intel_crtc_state *pipe_config)
8872 {
8873         struct drm_device *dev = crtc->base.dev;
8874         struct drm_i915_private *dev_priv = to_i915(dev);
8875         enum intel_display_power_domain power_domain;
8876         uint32_t tmp;
8877         bool ret;
8878
8879         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
8880         if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
8881                 return false;
8882
8883         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
8884         pipe_config->shared_dpll = NULL;
8885
8886         ret = false;
8887         tmp = I915_READ(PIPECONF(crtc->pipe));
8888         if (!(tmp & PIPECONF_ENABLE))
8889                 goto out;
8890
8891         switch (tmp & PIPECONF_BPC_MASK) {
8892         case PIPECONF_6BPC:
8893                 pipe_config->pipe_bpp = 18;
8894                 break;
8895         case PIPECONF_8BPC:
8896                 pipe_config->pipe_bpp = 24;
8897                 break;
8898         case PIPECONF_10BPC:
8899                 pipe_config->pipe_bpp = 30;
8900                 break;
8901         case PIPECONF_12BPC:
8902                 pipe_config->pipe_bpp = 36;
8903                 break;
8904         default:
8905                 break;
8906         }
8907
8908         if (tmp & PIPECONF_COLOR_RANGE_SELECT)
8909                 pipe_config->limited_color_range = true;
8910
8911         if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
8912                 struct intel_shared_dpll *pll;
8913                 enum intel_dpll_id pll_id;
8914
8915                 pipe_config->has_pch_encoder = true;
8916
8917                 tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
8918                 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
8919                                           FDI_DP_PORT_WIDTH_SHIFT) + 1;
8920
8921                 ironlake_get_fdi_m_n_config(crtc, pipe_config);
8922
8923                 if (HAS_PCH_IBX(dev_priv)) {
8924                         /*
8925                          * The pipe->pch transcoder and pch transcoder->pll
8926                          * mapping is fixed.
8927                          */
8928                         pll_id = (enum intel_dpll_id) crtc->pipe;
8929                 } else {
8930                         tmp = I915_READ(PCH_DPLL_SEL);
8931                         if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
8932                                 pll_id = DPLL_ID_PCH_PLL_B;
8933                         else
8934                                 pll_id= DPLL_ID_PCH_PLL_A;
8935                 }
8936
8937                 pipe_config->shared_dpll =
8938                         intel_get_shared_dpll_by_id(dev_priv, pll_id);
8939                 pll = pipe_config->shared_dpll;
8940
8941                 WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
8942                                                 &pipe_config->dpll_hw_state));
8943
8944                 tmp = pipe_config->dpll_hw_state.dpll;
8945                 pipe_config->pixel_multiplier =
8946                         ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
8947                          >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
8948
8949                 ironlake_pch_clock_get(crtc, pipe_config);
8950         } else {
8951                 pipe_config->pixel_multiplier = 1;
8952         }
8953
8954         intel_get_pipe_timings(crtc, pipe_config);
8955         intel_get_pipe_src_size(crtc, pipe_config);
8956
8957         ironlake_get_pfit_config(crtc, pipe_config);
8958
8959         ret = true;
8960
8961 out:
8962         intel_display_power_put(dev_priv, power_domain);
8963
8964         return ret;
8965 }
8966
8967 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
8968 {
8969         struct drm_device *dev = &dev_priv->drm;
8970         struct intel_crtc *crtc;
8971
8972         for_each_intel_crtc(dev, crtc)
8973                 I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
8974                      pipe_name(crtc->pipe));
8975
8976         I915_STATE_WARN(I915_READ(HSW_PWR_WELL_CTL_DRIVER(HSW_DISP_PW_GLOBAL)),
8977                         "Display power well on\n");
8978         I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
8979         I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
8980         I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
8981         I915_STATE_WARN(I915_READ(PP_STATUS(0)) & PP_ON, "Panel power on\n");
8982         I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
8983              "CPU PWM1 enabled\n");
8984         if (IS_HASWELL(dev_priv))
8985                 I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
8986                      "CPU PWM2 enabled\n");
8987         I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
8988              "PCH PWM1 enabled\n");
8989         I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
8990              "Utility pin enabled\n");
8991         I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
8992
8993         /*
8994          * In theory we can still leave IRQs enabled, as long as only the HPD
8995          * interrupts remain enabled. We used to check for that, but since it's
8996          * gen-specific and since we only disable LCPLL after we fully disable
8997          * the interrupts, the check below should be enough.
8998          */
8999         I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
9000 }
9001
9002 static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
9003 {
9004         if (IS_HASWELL(dev_priv))
9005                 return I915_READ(D_COMP_HSW);
9006         else
9007                 return I915_READ(D_COMP_BDW);
9008 }
9009
9010 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
9011 {
9012         if (IS_HASWELL(dev_priv)) {
9013                 mutex_lock(&dev_priv->pcu_lock);
9014                 if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
9015                                             val))
9016                         DRM_DEBUG_KMS("Failed to write to D_COMP\n");
9017                 mutex_unlock(&dev_priv->pcu_lock);
9018         } else {
9019                 I915_WRITE(D_COMP_BDW, val);
9020                 POSTING_READ(D_COMP_BDW);
9021         }
9022 }
9023
9024 /*
9025  * This function implements pieces of two sequences from BSpec:
9026  * - Sequence for display software to disable LCPLL
9027  * - Sequence for display software to allow package C8+
9028  * The steps implemented here are just the steps that actually touch the LCPLL
9029  * register. Callers should take care of disabling all the display engine
9030  * functions, doing the mode unset, fixing interrupts, etc.
9031  */
9032 static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
9033                               bool switch_to_fclk, bool allow_power_down)
9034 {
9035         uint32_t val;
9036
9037         assert_can_disable_lcpll(dev_priv);
9038
9039         val = I915_READ(LCPLL_CTL);
9040
9041         if (switch_to_fclk) {
9042                 val |= LCPLL_CD_SOURCE_FCLK;
9043                 I915_WRITE(LCPLL_CTL, val);
9044
9045                 if (wait_for_us(I915_READ(LCPLL_CTL) &
9046                                 LCPLL_CD_SOURCE_FCLK_DONE, 1))
9047                         DRM_ERROR("Switching to FCLK failed\n");
9048
9049                 val = I915_READ(LCPLL_CTL);
9050         }
9051
9052         val |= LCPLL_PLL_DISABLE;
9053         I915_WRITE(LCPLL_CTL, val);
9054         POSTING_READ(LCPLL_CTL);
9055
9056         if (intel_wait_for_register(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 0, 1))
9057                 DRM_ERROR("LCPLL still locked\n");
9058
9059         val = hsw_read_dcomp(dev_priv);
9060         val |= D_COMP_COMP_DISABLE;
9061         hsw_write_dcomp(dev_priv, val);
9062         ndelay(100);
9063
9064         if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0,
9065                      1))
9066                 DRM_ERROR("D_COMP RCOMP still in progress\n");
9067
9068         if (allow_power_down) {
9069                 val = I915_READ(LCPLL_CTL);
9070                 val |= LCPLL_POWER_DOWN_ALLOW;
9071                 I915_WRITE(LCPLL_CTL, val);
9072                 POSTING_READ(LCPLL_CTL);
9073         }
9074 }
9075
9076 /*
9077  * Fully restores LCPLL, disallowing power down and switching back to LCPLL
9078  * source.
9079  */
9080 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
9081 {
9082         uint32_t val;
9083
9084         val = I915_READ(LCPLL_CTL);
9085
9086         if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
9087                     LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
9088                 return;
9089
9090         /*
9091          * Make sure we're not on PC8 state before disabling PC8, otherwise
9092          * we'll hang the machine. To prevent PC8 state, just enable force_wake.
9093          */
9094         intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
9095
9096         if (val & LCPLL_POWER_DOWN_ALLOW) {
9097                 val &= ~LCPLL_POWER_DOWN_ALLOW;
9098                 I915_WRITE(LCPLL_CTL, val);
9099                 POSTING_READ(LCPLL_CTL);
9100         }
9101
9102         val = hsw_read_dcomp(dev_priv);
9103         val |= D_COMP_COMP_FORCE;
9104         val &= ~D_COMP_COMP_DISABLE;
9105         hsw_write_dcomp(dev_priv, val);
9106
9107         val = I915_READ(LCPLL_CTL);
9108         val &= ~LCPLL_PLL_DISABLE;
9109         I915_WRITE(LCPLL_CTL, val);
9110
9111         if (intel_wait_for_register(dev_priv,
9112                                     LCPLL_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK,
9113                                     5))
9114                 DRM_ERROR("LCPLL not locked yet\n");
9115
9116         if (val & LCPLL_CD_SOURCE_FCLK) {
9117                 val = I915_READ(LCPLL_CTL);
9118                 val &= ~LCPLL_CD_SOURCE_FCLK;
9119                 I915_WRITE(LCPLL_CTL, val);
9120
9121                 if (wait_for_us((I915_READ(LCPLL_CTL) &
9122                                  LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
9123                         DRM_ERROR("Switching back to LCPLL failed\n");
9124         }
9125
9126         intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
9127
9128         intel_update_cdclk(dev_priv);
9129         intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
9130 }
9131
9132 /*
9133  * Package states C8 and deeper are really deep PC states that can only be
9134  * reached when all the devices on the system allow it, so even if the graphics
9135  * device allows PC8+, it doesn't mean the system will actually get to these
9136  * states. Our driver only allows PC8+ when going into runtime PM.
9137  *
9138  * The requirements for PC8+ are that all the outputs are disabled, the power
9139  * well is disabled and most interrupts are disabled, and these are also
9140  * requirements for runtime PM. When these conditions are met, we manually do
9141  * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
9142  * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
9143  * hang the machine.
9144  *
9145  * When we really reach PC8 or deeper states (not just when we allow it) we lose
9146  * the state of some registers, so when we come back from PC8+ we need to
9147  * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
9148  * need to take care of the registers kept by RC6. Notice that this happens even
9149  * if we don't put the device in PCI D3 state (which is what currently happens
9150  * because of the runtime PM support).
9151  *
9152  * For more, read "Display Sequences for Package C8" on the hardware
9153  * documentation.
9154  */
9155 void hsw_enable_pc8(struct drm_i915_private *dev_priv)
9156 {
9157         uint32_t val;
9158
9159         DRM_DEBUG_KMS("Enabling package C8+\n");
9160
9161         if (HAS_PCH_LPT_LP(dev_priv)) {
9162                 val = I915_READ(SOUTH_DSPCLK_GATE_D);
9163                 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
9164                 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
9165         }
9166
9167         lpt_disable_clkout_dp(dev_priv);
9168         hsw_disable_lcpll(dev_priv, true, true);
9169 }
9170
9171 void hsw_disable_pc8(struct drm_i915_private *dev_priv)
9172 {
9173         uint32_t val;
9174
9175         DRM_DEBUG_KMS("Disabling package C8+\n");
9176
9177         hsw_restore_lcpll(dev_priv);
9178         lpt_init_pch_refclk(dev_priv);
9179
9180         if (HAS_PCH_LPT_LP(dev_priv)) {
9181                 val = I915_READ(SOUTH_DSPCLK_GATE_D);
9182                 val |= PCH_LP_PARTITION_LEVEL_DISABLE;
9183                 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
9184         }
9185 }
9186
9187 static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
9188                                       struct intel_crtc_state *crtc_state)
9189 {
9190         struct intel_atomic_state *state =
9191                 to_intel_atomic_state(crtc_state->base.state);
9192
9193         if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) {
9194                 struct intel_encoder *encoder =
9195                         intel_get_crtc_new_encoder(state, crtc_state);
9196
9197                 if (!intel_get_shared_dpll(crtc, crtc_state, encoder)) {
9198                         DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
9199                                          pipe_name(crtc->pipe));
9200                         return -EINVAL;
9201                 }
9202         }
9203
9204         return 0;
9205 }
9206
9207 static void cannonlake_get_ddi_pll(struct drm_i915_private *dev_priv,
9208                                    enum port port,
9209                                    struct intel_crtc_state *pipe_config)
9210 {
9211         enum intel_dpll_id id;
9212         u32 temp;
9213
9214         temp = I915_READ(DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
9215         id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
9216
9217         if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL2))
9218                 return;
9219
9220         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9221 }
9222
9223 static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv,
9224                                 enum port port,
9225                                 struct intel_crtc_state *pipe_config)
9226 {
9227         enum intel_dpll_id id;
9228         u32 temp;
9229
9230         /* TODO: TBT pll not implemented. */
9231         switch (port) {
9232         case PORT_A:
9233         case PORT_B:
9234                 temp = I915_READ(DPCLKA_CFGCR0_ICL) &
9235                        DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
9236                 id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
9237
9238                 if (WARN_ON(id != DPLL_ID_ICL_DPLL0 && id != DPLL_ID_ICL_DPLL1))
9239                         return;
9240                 break;
9241         case PORT_C:
9242                 id = DPLL_ID_ICL_MGPLL1;
9243                 break;
9244         case PORT_D:
9245                 id = DPLL_ID_ICL_MGPLL2;
9246                 break;
9247         case PORT_E:
9248                 id = DPLL_ID_ICL_MGPLL3;
9249                 break;
9250         case PORT_F:
9251                 id = DPLL_ID_ICL_MGPLL4;
9252                 break;
9253         default:
9254                 MISSING_CASE(port);
9255                 return;
9256         }
9257
9258         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9259 }
9260
9261 static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
9262                                 enum port port,
9263                                 struct intel_crtc_state *pipe_config)
9264 {
9265         enum intel_dpll_id id;
9266
9267         switch (port) {
9268         case PORT_A:
9269                 id = DPLL_ID_SKL_DPLL0;
9270                 break;
9271         case PORT_B:
9272                 id = DPLL_ID_SKL_DPLL1;
9273                 break;
9274         case PORT_C:
9275                 id = DPLL_ID_SKL_DPLL2;
9276                 break;
9277         default:
9278                 DRM_ERROR("Incorrect port type\n");
9279                 return;
9280         }
9281
9282         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9283 }
9284
9285 static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
9286                                 enum port port,
9287                                 struct intel_crtc_state *pipe_config)
9288 {
9289         enum intel_dpll_id id;
9290         u32 temp;
9291
9292         temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
9293         id = temp >> (port * 3 + 1);
9294
9295         if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL3))
9296                 return;
9297
9298         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9299 }
9300
9301 static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
9302                                 enum port port,
9303                                 struct intel_crtc_state *pipe_config)
9304 {
9305         enum intel_dpll_id id;
9306         uint32_t ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
9307
9308         switch (ddi_pll_sel) {
9309         case PORT_CLK_SEL_WRPLL1:
9310                 id = DPLL_ID_WRPLL1;
9311                 break;
9312         case PORT_CLK_SEL_WRPLL2:
9313                 id = DPLL_ID_WRPLL2;
9314                 break;
9315         case PORT_CLK_SEL_SPLL:
9316                 id = DPLL_ID_SPLL;
9317                 break;
9318         case PORT_CLK_SEL_LCPLL_810:
9319                 id = DPLL_ID_LCPLL_810;
9320                 break;
9321         case PORT_CLK_SEL_LCPLL_1350:
9322                 id = DPLL_ID_LCPLL_1350;
9323                 break;
9324         case PORT_CLK_SEL_LCPLL_2700:
9325                 id = DPLL_ID_LCPLL_2700;
9326                 break;
9327         default:
9328                 MISSING_CASE(ddi_pll_sel);
9329                 /* fall through */
9330         case PORT_CLK_SEL_NONE:
9331                 return;
9332         }
9333
9334         pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9335 }
9336
9337 static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
9338                                      struct intel_crtc_state *pipe_config,
9339                                      u64 *power_domain_mask)
9340 {
9341         struct drm_device *dev = crtc->base.dev;
9342         struct drm_i915_private *dev_priv = to_i915(dev);
9343         enum intel_display_power_domain power_domain;
9344         u32 tmp;
9345
9346         /*
9347          * The pipe->transcoder mapping is fixed with the exception of the eDP
9348          * transcoder handled below.
9349          */
9350         pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9351
9352         /*
9353          * XXX: Do intel_display_power_get_if_enabled before reading this (for
9354          * consistency and less surprising code; it's in always on power).
9355          */
9356         tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
9357         if (tmp & TRANS_DDI_FUNC_ENABLE) {
9358                 enum pipe trans_edp_pipe;
9359                 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
9360                 default:
9361                         WARN(1, "unknown pipe linked to edp transcoder\n");
9362                         /* fall through */
9363                 case TRANS_DDI_EDP_INPUT_A_ONOFF:
9364                 case TRANS_DDI_EDP_INPUT_A_ON:
9365                         trans_edp_pipe = PIPE_A;
9366                         break;
9367                 case TRANS_DDI_EDP_INPUT_B_ONOFF:
9368                         trans_edp_pipe = PIPE_B;
9369                         break;
9370                 case TRANS_DDI_EDP_INPUT_C_ONOFF:
9371                         trans_edp_pipe = PIPE_C;
9372                         break;
9373                 }
9374
9375                 if (trans_edp_pipe == crtc->pipe)
9376                         pipe_config->cpu_transcoder = TRANSCODER_EDP;
9377         }
9378
9379         power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
9380         if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9381                 return false;
9382         *power_domain_mask |= BIT_ULL(power_domain);
9383
9384         tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
9385
9386         return tmp & PIPECONF_ENABLE;
9387 }
9388
9389 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
9390                                          struct intel_crtc_state *pipe_config,
9391                                          u64 *power_domain_mask)
9392 {
9393         struct drm_device *dev = crtc->base.dev;
9394         struct drm_i915_private *dev_priv = to_i915(dev);
9395         enum intel_display_power_domain power_domain;
9396         enum port port;
9397         enum transcoder cpu_transcoder;
9398         u32 tmp;
9399
9400         for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
9401                 if (port == PORT_A)
9402                         cpu_transcoder = TRANSCODER_DSI_A;
9403                 else
9404                         cpu_transcoder = TRANSCODER_DSI_C;
9405
9406                 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
9407                 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9408                         continue;
9409                 *power_domain_mask |= BIT_ULL(power_domain);
9410
9411                 /*
9412                  * The PLL needs to be enabled with a valid divider
9413                  * configuration, otherwise accessing DSI registers will hang
9414                  * the machine. See BSpec North Display Engine
9415                  * registers/MIPI[BXT]. We can break out here early, since we
9416                  * need the same DSI PLL to be enabled for both DSI ports.
9417                  */
9418                 if (!bxt_dsi_pll_is_enabled(dev_priv))
9419                         break;
9420
9421                 /* XXX: this works for video mode only */
9422                 tmp = I915_READ(BXT_MIPI_PORT_CTRL(port));
9423                 if (!(tmp & DPI_ENABLE))
9424                         continue;
9425
9426                 tmp = I915_READ(MIPI_CTRL(port));
9427                 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
9428                         continue;
9429
9430                 pipe_config->cpu_transcoder = cpu_transcoder;
9431                 break;
9432         }
9433
9434         return transcoder_is_dsi(pipe_config->cpu_transcoder);
9435 }
9436
9437 static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
9438                                        struct intel_crtc_state *pipe_config)
9439 {
9440         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9441         struct intel_shared_dpll *pll;
9442         enum port port;
9443         uint32_t tmp;
9444
9445         tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
9446
9447         port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
9448
9449         if (IS_ICELAKE(dev_priv))
9450                 icelake_get_ddi_pll(dev_priv, port, pipe_config);
9451         else if (IS_CANNONLAKE(dev_priv))
9452                 cannonlake_get_ddi_pll(dev_priv, port, pipe_config);
9453         else if (IS_GEN9_BC(dev_priv))
9454                 skylake_get_ddi_pll(dev_priv, port, pipe_config);
9455         else if (IS_GEN9_LP(dev_priv))
9456                 bxt_get_ddi_pll(dev_priv, port, pipe_config);
9457         else
9458                 haswell_get_ddi_pll(dev_priv, port, pipe_config);
9459
9460         pll = pipe_config->shared_dpll;
9461         if (pll) {
9462                 WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
9463                                                 &pipe_config->dpll_hw_state));
9464         }
9465
9466         /*
9467          * Haswell has only FDI/PCH transcoder A. It is which is connected to
9468          * DDI E. So just check whether this pipe is wired to DDI E and whether
9469          * the PCH transcoder is on.
9470          */
9471         if (INTEL_GEN(dev_priv) < 9 &&
9472             (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
9473                 pipe_config->has_pch_encoder = true;
9474
9475                 tmp = I915_READ(FDI_RX_CTL(PIPE_A));
9476                 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
9477                                           FDI_DP_PORT_WIDTH_SHIFT) + 1;
9478
9479                 ironlake_get_fdi_m_n_config(crtc, pipe_config);
9480         }
9481 }
9482
9483 static bool haswell_get_pipe_config(struct intel_crtc *crtc,
9484                                     struct intel_crtc_state *pipe_config)
9485 {
9486         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9487         enum intel_display_power_domain power_domain;
9488         u64 power_domain_mask;
9489         bool active;
9490
9491         intel_crtc_init_scalers(crtc, pipe_config);
9492
9493         power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
9494         if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9495                 return false;
9496         power_domain_mask = BIT_ULL(power_domain);
9497
9498         pipe_config->shared_dpll = NULL;
9499
9500         active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_mask);
9501
9502         if (IS_GEN9_LP(dev_priv) &&
9503             bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_mask)) {
9504                 WARN_ON(active);
9505                 active = true;
9506         }
9507
9508         if (!active)
9509                 goto out;
9510
9511         if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
9512                 haswell_get_ddi_port_state(crtc, pipe_config);
9513                 intel_get_pipe_timings(crtc, pipe_config);
9514         }
9515
9516         intel_get_pipe_src_size(crtc, pipe_config);
9517
9518         pipe_config->gamma_mode =
9519                 I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK;
9520
9521         if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
9522                 u32 tmp = I915_READ(PIPEMISC(crtc->pipe));
9523                 bool clrspace_yuv = tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV;
9524
9525                 if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
9526                         bool blend_mode_420 = tmp &
9527                                               PIPEMISC_YUV420_MODE_FULL_BLEND;
9528
9529                         pipe_config->ycbcr420 = tmp & PIPEMISC_YUV420_ENABLE;
9530                         if (pipe_config->ycbcr420 != clrspace_yuv ||
9531                             pipe_config->ycbcr420 != blend_mode_420)
9532                                 DRM_DEBUG_KMS("Bad 4:2:0 mode (%08x)\n", tmp);
9533                 } else if (clrspace_yuv) {
9534                         DRM_DEBUG_KMS("YCbCr 4:2:0 Unsupported\n");
9535                 }
9536         }
9537
9538         power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
9539         if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
9540                 power_domain_mask |= BIT_ULL(power_domain);
9541                 if (INTEL_GEN(dev_priv) >= 9)
9542                         skylake_get_pfit_config(crtc, pipe_config);
9543                 else
9544                         ironlake_get_pfit_config(crtc, pipe_config);
9545         }
9546
9547         if (hsw_crtc_supports_ips(crtc)) {
9548                 if (IS_HASWELL(dev_priv))
9549                         pipe_config->ips_enabled = I915_READ(IPS_CTL) & IPS_ENABLE;
9550                 else {
9551                         /*
9552                          * We cannot readout IPS state on broadwell, set to
9553                          * true so we can set it to a defined state on first
9554                          * commit.
9555                          */
9556                         pipe_config->ips_enabled = true;
9557                 }
9558         }
9559
9560         if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
9561             !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
9562                 pipe_config->pixel_multiplier =
9563                         I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
9564         } else {
9565                 pipe_config->pixel_multiplier = 1;
9566         }
9567
9568 out:
9569         for_each_power_domain(power_domain, power_domain_mask)
9570                 intel_display_power_put(dev_priv, power_domain);
9571
9572         return active;
9573 }
9574
9575 static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
9576 {
9577         struct drm_i915_private *dev_priv =
9578                 to_i915(plane_state->base.plane->dev);
9579         const struct drm_framebuffer *fb = plane_state->base.fb;
9580         const struct drm_i915_gem_object *obj = intel_fb_obj(fb);
9581         u32 base;
9582
9583         if (INTEL_INFO(dev_priv)->cursor_needs_physical)
9584                 base = obj->phys_handle->busaddr;
9585         else
9586                 base = intel_plane_ggtt_offset(plane_state);
9587
9588         base += plane_state->main.offset;
9589
9590         /* ILK+ do this automagically */
9591         if (HAS_GMCH_DISPLAY(dev_priv) &&
9592             plane_state->base.rotation & DRM_MODE_ROTATE_180)
9593                 base += (plane_state->base.crtc_h *
9594                          plane_state->base.crtc_w - 1) * fb->format->cpp[0];
9595
9596         return base;
9597 }
9598
9599 static u32 intel_cursor_position(const struct intel_plane_state *plane_state)
9600 {
9601         int x = plane_state->base.crtc_x;
9602         int y = plane_state->base.crtc_y;
9603         u32 pos = 0;
9604
9605         if (x < 0) {
9606                 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
9607                 x = -x;
9608         }
9609         pos |= x << CURSOR_X_SHIFT;
9610
9611         if (y < 0) {
9612                 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
9613                 y = -y;
9614         }
9615         pos |= y << CURSOR_Y_SHIFT;
9616
9617         return pos;
9618 }
9619
9620 static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
9621 {
9622         const struct drm_mode_config *config =
9623                 &plane_state->base.plane->dev->mode_config;
9624         int width = plane_state->base.crtc_w;
9625         int height = plane_state->base.crtc_h;
9626
9627         return width > 0 && width <= config->cursor_width &&
9628                 height > 0 && height <= config->cursor_height;
9629 }
9630
9631 static int intel_check_cursor(struct intel_crtc_state *crtc_state,
9632                               struct intel_plane_state *plane_state)
9633 {
9634         const struct drm_framebuffer *fb = plane_state->base.fb;
9635         int src_x, src_y;
9636         u32 offset;
9637         int ret;
9638
9639         ret = drm_atomic_helper_check_plane_state(&plane_state->base,
9640                                                   &crtc_state->base,
9641                                                   DRM_PLANE_HELPER_NO_SCALING,
9642                                                   DRM_PLANE_HELPER_NO_SCALING,
9643                                                   true, true);
9644         if (ret)
9645                 return ret;
9646
9647         if (!fb)
9648                 return 0;
9649
9650         if (fb->modifier != DRM_FORMAT_MOD_LINEAR) {
9651                 DRM_DEBUG_KMS("cursor cannot be tiled\n");
9652                 return -EINVAL;
9653         }
9654
9655         src_x = plane_state->base.src_x >> 16;
9656         src_y = plane_state->base.src_y >> 16;
9657
9658         intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
9659         offset = intel_compute_tile_offset(&src_x, &src_y, plane_state, 0);
9660
9661         if (src_x != 0 || src_y != 0) {
9662                 DRM_DEBUG_KMS("Arbitrary cursor panning not supported\n");
9663                 return -EINVAL;
9664         }
9665
9666         plane_state->main.offset = offset;
9667
9668         return 0;
9669 }
9670
9671 static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
9672                            const struct intel_plane_state *plane_state)
9673 {
9674         const struct drm_framebuffer *fb = plane_state->base.fb;
9675
9676         return CURSOR_ENABLE |
9677                 CURSOR_GAMMA_ENABLE |
9678                 CURSOR_FORMAT_ARGB |
9679                 CURSOR_STRIDE(fb->pitches[0]);
9680 }
9681
9682 static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
9683 {
9684         int width = plane_state->base.crtc_w;
9685
9686         /*
9687          * 845g/865g are only limited by the width of their cursors,
9688          * the height is arbitrary up to the precision of the register.
9689          */
9690         return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64);
9691 }
9692
9693 static int i845_check_cursor(struct intel_plane *plane,
9694                              struct intel_crtc_state *crtc_state,
9695                              struct intel_plane_state *plane_state)
9696 {
9697         const struct drm_framebuffer *fb = plane_state->base.fb;
9698         int ret;
9699
9700         ret = intel_check_cursor(crtc_state, plane_state);
9701         if (ret)
9702                 return ret;
9703
9704         /* if we want to turn off the cursor ignore width and height */
9705         if (!fb)
9706                 return 0;
9707
9708         /* Check for which cursor types we support */
9709         if (!i845_cursor_size_ok(plane_state)) {
9710                 DRM_DEBUG("Cursor dimension %dx%d not supported\n",
9711                           plane_state->base.crtc_w,
9712                           plane_state->base.crtc_h);
9713                 return -EINVAL;
9714         }
9715
9716         switch (fb->pitches[0]) {
9717         case 256:
9718         case 512:
9719         case 1024:
9720         case 2048:
9721                 break;
9722         default:
9723                 DRM_DEBUG_KMS("Invalid cursor stride (%u)\n",
9724                               fb->pitches[0]);
9725                 return -EINVAL;
9726         }
9727
9728         plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state);
9729
9730         return 0;
9731 }
9732
9733 static void i845_update_cursor(struct intel_plane *plane,
9734                                const struct intel_crtc_state *crtc_state,
9735                                const struct intel_plane_state *plane_state)
9736 {
9737         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
9738         u32 cntl = 0, base = 0, pos = 0, size = 0;
9739         unsigned long irqflags;
9740
9741         if (plane_state && plane_state->base.visible) {
9742                 unsigned int width = plane_state->base.crtc_w;
9743                 unsigned int height = plane_state->base.crtc_h;
9744
9745                 cntl = plane_state->ctl;
9746                 size = (height << 12) | width;
9747
9748                 base = intel_cursor_base(plane_state);
9749                 pos = intel_cursor_position(plane_state);
9750         }
9751
9752         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
9753
9754         /* On these chipsets we can only modify the base/size/stride
9755          * whilst the cursor is disabled.
9756          */
9757         if (plane->cursor.base != base ||
9758             plane->cursor.size != size ||
9759             plane->cursor.cntl != cntl) {
9760                 I915_WRITE_FW(CURCNTR(PIPE_A), 0);
9761                 I915_WRITE_FW(CURBASE(PIPE_A), base);
9762                 I915_WRITE_FW(CURSIZE, size);
9763                 I915_WRITE_FW(CURPOS(PIPE_A), pos);
9764                 I915_WRITE_FW(CURCNTR(PIPE_A), cntl);
9765
9766                 plane->cursor.base = base;
9767                 plane->cursor.size = size;
9768                 plane->cursor.cntl = cntl;
9769         } else {
9770                 I915_WRITE_FW(CURPOS(PIPE_A), pos);
9771         }
9772
9773         POSTING_READ_FW(CURCNTR(PIPE_A));
9774
9775         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
9776 }
9777
9778 static void i845_disable_cursor(struct intel_plane *plane,
9779                                 struct intel_crtc *crtc)
9780 {
9781         i845_update_cursor(plane, NULL, NULL);
9782 }
9783
9784 static bool i845_cursor_get_hw_state(struct intel_plane *plane,
9785                                      enum pipe *pipe)
9786 {
9787         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
9788         enum intel_display_power_domain power_domain;
9789         bool ret;
9790
9791         power_domain = POWER_DOMAIN_PIPE(PIPE_A);
9792         if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9793                 return false;
9794
9795         ret = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
9796
9797         *pipe = PIPE_A;
9798
9799         intel_display_power_put(dev_priv, power_domain);
9800
9801         return ret;
9802 }
9803
9804 static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
9805                            const struct intel_plane_state *plane_state)
9806 {
9807         struct drm_i915_private *dev_priv =
9808                 to_i915(plane_state->base.plane->dev);
9809         struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
9810         u32 cntl = 0;
9811
9812         if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv))
9813                 cntl |= MCURSOR_TRICKLE_FEED_DISABLE;
9814
9815         if (INTEL_GEN(dev_priv) <= 10) {
9816                 cntl |= MCURSOR_GAMMA_ENABLE;
9817
9818                 if (HAS_DDI(dev_priv))
9819                         cntl |= MCURSOR_PIPE_CSC_ENABLE;
9820         }
9821
9822         if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
9823                 cntl |= MCURSOR_PIPE_SELECT(crtc->pipe);
9824
9825         switch (plane_state->base.crtc_w) {
9826         case 64:
9827                 cntl |= MCURSOR_MODE_64_ARGB_AX;
9828                 break;
9829         case 128:
9830                 cntl |= MCURSOR_MODE_128_ARGB_AX;
9831                 break;
9832         case 256:
9833                 cntl |= MCURSOR_MODE_256_ARGB_AX;
9834                 break;
9835         default:
9836                 MISSING_CASE(plane_state->base.crtc_w);
9837                 return 0;
9838         }
9839
9840         if (plane_state->base.rotation & DRM_MODE_ROTATE_180)
9841                 cntl |= MCURSOR_ROTATE_180;
9842
9843         return cntl;
9844 }
9845
9846 static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
9847 {
9848         struct drm_i915_private *dev_priv =
9849                 to_i915(plane_state->base.plane->dev);
9850         int width = plane_state->base.crtc_w;
9851         int height = plane_state->base.crtc_h;
9852
9853         if (!intel_cursor_size_ok(plane_state))
9854                 return false;
9855
9856         /* Cursor width is limited to a few power-of-two sizes */
9857         switch (width) {
9858         case 256:
9859         case 128:
9860         case 64:
9861                 break;
9862         default:
9863                 return false;
9864         }
9865
9866         /*
9867          * IVB+ have CUR_FBC_CTL which allows an arbitrary cursor
9868          * height from 8 lines up to the cursor width, when the
9869          * cursor is not rotated. Everything else requires square
9870          * cursors.
9871          */
9872         if (HAS_CUR_FBC(dev_priv) &&
9873             plane_state->base.rotation & DRM_MODE_ROTATE_0) {
9874                 if (height < 8 || height > width)
9875                         return false;
9876         } else {
9877                 if (height != width)
9878                         return false;
9879         }
9880
9881         return true;
9882 }
9883
9884 static int i9xx_check_cursor(struct intel_plane *plane,
9885                              struct intel_crtc_state *crtc_state,
9886                              struct intel_plane_state *plane_state)
9887 {
9888         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
9889         const struct drm_framebuffer *fb = plane_state->base.fb;
9890         enum pipe pipe = plane->pipe;
9891         int ret;
9892
9893         ret = intel_check_cursor(crtc_state, plane_state);
9894         if (ret)
9895                 return ret;
9896
9897         /* if we want to turn off the cursor ignore width and height */
9898         if (!fb)
9899                 return 0;
9900
9901         /* Check for which cursor types we support */
9902         if (!i9xx_cursor_size_ok(plane_state)) {
9903                 DRM_DEBUG("Cursor dimension %dx%d not supported\n",
9904                           plane_state->base.crtc_w,
9905                           plane_state->base.crtc_h);
9906                 return -EINVAL;
9907         }
9908
9909         if (fb->pitches[0] != plane_state->base.crtc_w * fb->format->cpp[0]) {
9910                 DRM_DEBUG_KMS("Invalid cursor stride (%u) (cursor width %d)\n",
9911                               fb->pitches[0], plane_state->base.crtc_w);
9912                 return -EINVAL;
9913         }
9914
9915         /*
9916          * There's something wrong with the cursor on CHV pipe C.
9917          * If it straddles the left edge of the screen then
9918          * moving it away from the edge or disabling it often
9919          * results in a pipe underrun, and often that can lead to
9920          * dead pipe (constant underrun reported, and it scans
9921          * out just a solid color). To recover from that, the
9922          * display power well must be turned off and on again.
9923          * Refuse the put the cursor into that compromised position.
9924          */
9925         if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
9926             plane_state->base.visible && plane_state->base.crtc_x < 0) {
9927                 DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
9928                 return -EINVAL;
9929         }
9930
9931         plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state);
9932
9933         return 0;
9934 }
9935
9936 static void i9xx_update_cursor(struct intel_plane *plane,
9937                                const struct intel_crtc_state *crtc_state,
9938                                const struct intel_plane_state *plane_state)
9939 {
9940         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
9941         enum pipe pipe = plane->pipe;
9942         u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0;
9943         unsigned long irqflags;
9944
9945         if (plane_state && plane_state->base.visible) {
9946                 cntl = plane_state->ctl;
9947
9948                 if (plane_state->base.crtc_h != plane_state->base.crtc_w)
9949                         fbc_ctl = CUR_FBC_CTL_EN | (plane_state->base.crtc_h - 1);
9950
9951                 base = intel_cursor_base(plane_state);
9952                 pos = intel_cursor_position(plane_state);
9953         }
9954
9955         spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
9956
9957         /*
9958          * On some platforms writing CURCNTR first will also
9959          * cause CURPOS to be armed by the CURBASE write.
9960          * Without the CURCNTR write the CURPOS write would
9961          * arm itself. Thus we always start the full update
9962          * with a CURCNTR write.
9963          *
9964          * On other platforms CURPOS always requires the
9965          * CURBASE write to arm the update. Additonally
9966          * a write to any of the cursor register will cancel
9967          * an already armed cursor update. Thus leaving out
9968          * the CURBASE write after CURPOS could lead to a
9969          * cursor that doesn't appear to move, or even change
9970          * shape. Thus we always write CURBASE.
9971          *
9972          * CURCNTR and CUR_FBC_CTL are always
9973          * armed by the CURBASE write only.
9974          */
9975         if (plane->cursor.base != base ||
9976             plane->cursor.size != fbc_ctl ||
9977             plane->cursor.cntl != cntl) {
9978                 I915_WRITE_FW(CURCNTR(pipe), cntl);
9979                 if (HAS_CUR_FBC(dev_priv))
9980                         I915_WRITE_FW(CUR_FBC_CTL(pipe), fbc_ctl);
9981                 I915_WRITE_FW(CURPOS(pipe), pos);
9982                 I915_WRITE_FW(CURBASE(pipe), base);
9983
9984                 plane->cursor.base = base;
9985                 plane->cursor.size = fbc_ctl;
9986                 plane->cursor.cntl = cntl;
9987         } else {
9988                 I915_WRITE_FW(CURPOS(pipe), pos);
9989                 I915_WRITE_FW(CURBASE(pipe), base);
9990         }
9991
9992         POSTING_READ_FW(CURBASE(pipe));
9993
9994         spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
9995 }
9996
9997 static void i9xx_disable_cursor(struct intel_plane *plane,
9998                                 struct intel_crtc *crtc)
9999 {
10000         i9xx_update_cursor(plane, NULL, NULL);
10001 }
10002
10003 static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
10004                                      enum pipe *pipe)
10005 {
10006         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10007         enum intel_display_power_domain power_domain;
10008         bool ret;
10009         u32 val;
10010
10011         /*
10012          * Not 100% correct for planes that can move between pipes,
10013          * but that's only the case for gen2-3 which don't have any
10014          * display power wells.
10015          */
10016         power_domain = POWER_DOMAIN_PIPE(plane->pipe);
10017         if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
10018                 return false;
10019
10020         val = I915_READ(CURCNTR(plane->pipe));
10021
10022         ret = val & MCURSOR_MODE;
10023
10024         if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
10025                 *pipe = plane->pipe;
10026         else
10027                 *pipe = (val & MCURSOR_PIPE_SELECT_MASK) >>
10028                         MCURSOR_PIPE_SELECT_SHIFT;
10029
10030         intel_display_power_put(dev_priv, power_domain);
10031
10032         return ret;
10033 }
10034
10035 /* VESA 640x480x72Hz mode to set on the pipe */
10036 static const struct drm_display_mode load_detect_mode = {
10037         DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
10038                  704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
10039 };
10040
10041 struct drm_framebuffer *
10042 intel_framebuffer_create(struct drm_i915_gem_object *obj,
10043                          struct drm_mode_fb_cmd2 *mode_cmd)
10044 {
10045         struct intel_framebuffer *intel_fb;
10046         int ret;
10047
10048         intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
10049         if (!intel_fb)
10050                 return ERR_PTR(-ENOMEM);
10051
10052         ret = intel_framebuffer_init(intel_fb, obj, mode_cmd);
10053         if (ret)
10054                 goto err;
10055
10056         return &intel_fb->base;
10057
10058 err:
10059         kfree(intel_fb);
10060         return ERR_PTR(ret);
10061 }
10062
10063 static int intel_modeset_disable_planes(struct drm_atomic_state *state,
10064                                         struct drm_crtc *crtc)
10065 {
10066         struct drm_plane *plane;
10067         struct drm_plane_state *plane_state;
10068         int ret, i;
10069
10070         ret = drm_atomic_add_affected_planes(state, crtc);
10071         if (ret)
10072                 return ret;
10073
10074         for_each_new_plane_in_state(state, plane, plane_state, i) {
10075                 if (plane_state->crtc != crtc)
10076                         continue;
10077
10078                 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
10079                 if (ret)
10080                         return ret;
10081
10082                 drm_atomic_set_fb_for_plane(plane_state, NULL);
10083         }
10084
10085         return 0;
10086 }
10087
10088 int intel_get_load_detect_pipe(struct drm_connector *connector,
10089                                const struct drm_display_mode *mode,
10090                                struct intel_load_detect_pipe *old,
10091                                struct drm_modeset_acquire_ctx *ctx)
10092 {
10093         struct intel_crtc *intel_crtc;
10094         struct intel_encoder *intel_encoder =
10095                 intel_attached_encoder(connector);
10096         struct drm_crtc *possible_crtc;
10097         struct drm_encoder *encoder = &intel_encoder->base;
10098         struct drm_crtc *crtc = NULL;
10099         struct drm_device *dev = encoder->dev;
10100         struct drm_i915_private *dev_priv = to_i915(dev);
10101         struct drm_mode_config *config = &dev->mode_config;
10102         struct drm_atomic_state *state = NULL, *restore_state = NULL;
10103         struct drm_connector_state *connector_state;
10104         struct intel_crtc_state *crtc_state;
10105         int ret, i = -1;
10106
10107         DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
10108                       connector->base.id, connector->name,
10109                       encoder->base.id, encoder->name);
10110
10111         old->restore_state = NULL;
10112
10113         WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
10114
10115         /*
10116          * Algorithm gets a little messy:
10117          *
10118          *   - if the connector already has an assigned crtc, use it (but make
10119          *     sure it's on first)
10120          *
10121          *   - try to find the first unused crtc that can drive this connector,
10122          *     and use that if we find one
10123          */
10124
10125         /* See if we already have a CRTC for this connector */
10126         if (connector->state->crtc) {
10127                 crtc = connector->state->crtc;
10128
10129                 ret = drm_modeset_lock(&crtc->mutex, ctx);
10130                 if (ret)
10131                         goto fail;
10132
10133                 /* Make sure the crtc and connector are running */
10134                 goto found;
10135         }
10136
10137         /* Find an unused one (if possible) */
10138         for_each_crtc(dev, possible_crtc) {
10139                 i++;
10140                 if (!(encoder->possible_crtcs & (1 << i)))
10141                         continue;
10142
10143                 ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
10144                 if (ret)
10145                         goto fail;
10146
10147                 if (possible_crtc->state->enable) {
10148                         drm_modeset_unlock(&possible_crtc->mutex);
10149                         continue;
10150                 }
10151
10152                 crtc = possible_crtc;
10153                 break;
10154         }
10155
10156         /*
10157          * If we didn't find an unused CRTC, don't use any.
10158          */
10159         if (!crtc) {
10160                 DRM_DEBUG_KMS("no pipe available for load-detect\n");
10161                 ret = -ENODEV;
10162                 goto fail;
10163         }
10164
10165 found:
10166         intel_crtc = to_intel_crtc(crtc);
10167
10168         state = drm_atomic_state_alloc(dev);
10169         restore_state = drm_atomic_state_alloc(dev);
10170         if (!state || !restore_state) {
10171                 ret = -ENOMEM;
10172                 goto fail;
10173         }
10174
10175         state->acquire_ctx = ctx;
10176         restore_state->acquire_ctx = ctx;
10177
10178         connector_state = drm_atomic_get_connector_state(state, connector);
10179         if (IS_ERR(connector_state)) {
10180                 ret = PTR_ERR(connector_state);
10181                 goto fail;
10182         }
10183
10184         ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
10185         if (ret)
10186                 goto fail;
10187
10188         crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
10189         if (IS_ERR(crtc_state)) {
10190                 ret = PTR_ERR(crtc_state);
10191                 goto fail;
10192         }
10193
10194         crtc_state->base.active = crtc_state->base.enable = true;
10195
10196         if (!mode)
10197                 mode = &load_detect_mode;
10198
10199         ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
10200         if (ret)
10201                 goto fail;
10202
10203         ret = intel_modeset_disable_planes(state, crtc);
10204         if (ret)
10205                 goto fail;
10206
10207         ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
10208         if (!ret)
10209                 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
10210         if (!ret)
10211                 ret = drm_atomic_add_affected_planes(restore_state, crtc);
10212         if (ret) {
10213                 DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret);
10214                 goto fail;
10215         }
10216
10217         ret = drm_atomic_commit(state);
10218         if (ret) {
10219                 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
10220                 goto fail;
10221         }
10222
10223         old->restore_state = restore_state;
10224         drm_atomic_state_put(state);
10225
10226         /* let the connector get through one full cycle before testing */
10227         intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
10228         return true;
10229
10230 fail:
10231         if (state) {
10232                 drm_atomic_state_put(state);
10233                 state = NULL;
10234         }
10235         if (restore_state) {
10236                 drm_atomic_state_put(restore_state);
10237                 restore_state = NULL;
10238         }
10239
10240         if (ret == -EDEADLK)
10241                 return ret;
10242
10243         return false;
10244 }
10245
10246 void intel_release_load_detect_pipe(struct drm_connector *connector,
10247                                     struct intel_load_detect_pipe *old,
10248                                     struct drm_modeset_acquire_ctx *ctx)
10249 {
10250         struct intel_encoder *intel_encoder =
10251                 intel_attached_encoder(connector);
10252         struct drm_encoder *encoder = &intel_encoder->base;
10253         struct drm_atomic_state *state = old->restore_state;
10254         int ret;
10255
10256         DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
10257                       connector->base.id, connector->name,
10258                       encoder->base.id, encoder->name);
10259
10260         if (!state)
10261                 return;
10262
10263         ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
10264         if (ret)
10265                 DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret);
10266         drm_atomic_state_put(state);
10267 }
10268
10269 static int i9xx_pll_refclk(struct drm_device *dev,
10270                            const struct intel_crtc_state *pipe_config)
10271 {
10272         struct drm_i915_private *dev_priv = to_i915(dev);
10273         u32 dpll = pipe_config->dpll_hw_state.dpll;
10274
10275         if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
10276                 return dev_priv->vbt.lvds_ssc_freq;
10277         else if (HAS_PCH_SPLIT(dev_priv))
10278                 return 120000;
10279         else if (!IS_GEN2(dev_priv))
10280                 return 96000;
10281         else
10282                 return 48000;
10283 }
10284
10285 /* Returns the clock of the currently programmed mode of the given pipe. */
10286 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
10287                                 struct intel_crtc_state *pipe_config)
10288 {
10289         struct drm_device *dev = crtc->base.dev;
10290         struct drm_i915_private *dev_priv = to_i915(dev);
10291         int pipe = pipe_config->cpu_transcoder;
10292         u32 dpll = pipe_config->dpll_hw_state.dpll;
10293         u32 fp;
10294         struct dpll clock;
10295         int port_clock;
10296         int refclk = i9xx_pll_refclk(dev, pipe_config);
10297
10298         if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
10299                 fp = pipe_config->dpll_hw_state.fp0;
10300         else
10301                 fp = pipe_config->dpll_hw_state.fp1;
10302
10303         clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
10304         if (IS_PINEVIEW(dev_priv)) {
10305                 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
10306                 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
10307         } else {
10308                 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
10309                 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
10310         }
10311
10312         if (!IS_GEN2(dev_priv)) {
10313                 if (IS_PINEVIEW(dev_priv))
10314                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
10315                                 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
10316                 else
10317                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
10318                                DPLL_FPA01_P1_POST_DIV_SHIFT);
10319
10320                 switch (dpll & DPLL_MODE_MASK) {
10321                 case DPLLB_MODE_DAC_SERIAL:
10322                         clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
10323                                 5 : 10;
10324                         break;
10325                 case DPLLB_MODE_LVDS:
10326                         clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
10327                                 7 : 14;
10328                         break;
10329                 default:
10330                         DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
10331                                   "mode\n", (int)(dpll & DPLL_MODE_MASK));
10332                         return;
10333                 }
10334
10335                 if (IS_PINEVIEW(dev_priv))
10336                         port_clock = pnv_calc_dpll_params(refclk, &clock);
10337                 else
10338                         port_clock = i9xx_calc_dpll_params(refclk, &clock);
10339         } else {
10340                 u32 lvds = IS_I830(dev_priv) ? 0 : I915_READ(LVDS);
10341                 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
10342
10343                 if (is_lvds) {
10344                         clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
10345                                        DPLL_FPA01_P1_POST_DIV_SHIFT);
10346
10347                         if (lvds & LVDS_CLKB_POWER_UP)
10348                                 clock.p2 = 7;
10349                         else
10350                                 clock.p2 = 14;
10351                 } else {
10352                         if (dpll & PLL_P1_DIVIDE_BY_TWO)
10353                                 clock.p1 = 2;
10354                         else {
10355                                 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
10356                                             DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
10357                         }
10358                         if (dpll & PLL_P2_DIVIDE_BY_4)
10359                                 clock.p2 = 4;
10360                         else
10361                                 clock.p2 = 2;
10362                 }
10363
10364                 port_clock = i9xx_calc_dpll_params(refclk, &clock);
10365         }
10366
10367         /*
10368          * This value includes pixel_multiplier. We will use
10369          * port_clock to compute adjusted_mode.crtc_clock in the
10370          * encoder's get_config() function.
10371          */
10372         pipe_config->port_clock = port_clock;
10373 }
10374
10375 int intel_dotclock_calculate(int link_freq,
10376                              const struct intel_link_m_n *m_n)
10377 {
10378         /*
10379          * The calculation for the data clock is:
10380          * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
10381          * But we want to avoid losing precison if possible, so:
10382          * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
10383          *
10384          * and the link clock is simpler:
10385          * link_clock = (m * link_clock) / n
10386          */
10387
10388         if (!m_n->link_n)
10389                 return 0;
10390
10391         return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
10392 }
10393
10394 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
10395                                    struct intel_crtc_state *pipe_config)
10396 {
10397         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10398
10399         /* read out port_clock from the DPLL */
10400         i9xx_crtc_clock_get(crtc, pipe_config);
10401
10402         /*
10403          * In case there is an active pipe without active ports,
10404          * we may need some idea for the dotclock anyway.
10405          * Calculate one based on the FDI configuration.
10406          */
10407         pipe_config->base.adjusted_mode.crtc_clock =
10408                 intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
10409                                          &pipe_config->fdi_m_n);
10410 }
10411
10412 /* Returns the currently programmed mode of the given encoder. */
10413 struct drm_display_mode *
10414 intel_encoder_current_mode(struct intel_encoder *encoder)
10415 {
10416         struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
10417         struct intel_crtc_state *crtc_state;
10418         struct drm_display_mode *mode;
10419         struct intel_crtc *crtc;
10420         enum pipe pipe;
10421
10422         if (!encoder->get_hw_state(encoder, &pipe))
10423                 return NULL;
10424
10425         crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
10426
10427         mode = kzalloc(sizeof(*mode), GFP_KERNEL);
10428         if (!mode)
10429                 return NULL;
10430
10431         crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
10432         if (!crtc_state) {
10433                 kfree(mode);
10434                 return NULL;
10435         }
10436
10437         crtc_state->base.crtc = &crtc->base;
10438
10439         if (!dev_priv->display.get_pipe_config(crtc, crtc_state)) {
10440                 kfree(crtc_state);
10441                 kfree(mode);
10442                 return NULL;
10443         }
10444
10445         encoder->get_config(encoder, crtc_state);
10446
10447         intel_mode_from_pipe_config(mode, crtc_state);
10448
10449         kfree(crtc_state);
10450
10451         return mode;
10452 }
10453
10454 static void intel_crtc_destroy(struct drm_crtc *crtc)
10455 {
10456         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10457
10458         drm_crtc_cleanup(crtc);
10459         kfree(intel_crtc);
10460 }
10461
10462 /**
10463  * intel_wm_need_update - Check whether watermarks need updating
10464  * @plane: drm plane
10465  * @state: new plane state
10466  *
10467  * Check current plane state versus the new one to determine whether
10468  * watermarks need to be recalculated.
10469  *
10470  * Returns true or false.
10471  */
10472 static bool intel_wm_need_update(struct drm_plane *plane,
10473                                  struct drm_plane_state *state)
10474 {
10475         struct intel_plane_state *new = to_intel_plane_state(state);
10476         struct intel_plane_state *cur = to_intel_plane_state(plane->state);
10477
10478         /* Update watermarks on tiling or size changes. */
10479         if (new->base.visible != cur->base.visible)
10480                 return true;
10481
10482         if (!cur->base.fb || !new->base.fb)
10483                 return false;
10484
10485         if (cur->base.fb->modifier != new->base.fb->modifier ||
10486             cur->base.rotation != new->base.rotation ||
10487             drm_rect_width(&new->base.src) != drm_rect_width(&cur->base.src) ||
10488             drm_rect_height(&new->base.src) != drm_rect_height(&cur->base.src) ||
10489             drm_rect_width(&new->base.dst) != drm_rect_width(&cur->base.dst) ||
10490             drm_rect_height(&new->base.dst) != drm_rect_height(&cur->base.dst))
10491                 return true;
10492
10493         return false;
10494 }
10495
10496 static bool needs_scaling(const struct intel_plane_state *state)
10497 {
10498         int src_w = drm_rect_width(&state->base.src) >> 16;
10499         int src_h = drm_rect_height(&state->base.src) >> 16;
10500         int dst_w = drm_rect_width(&state->base.dst);
10501         int dst_h = drm_rect_height(&state->base.dst);
10502
10503         return (src_w != dst_w || src_h != dst_h);
10504 }
10505
10506 int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
10507                                     struct drm_crtc_state *crtc_state,
10508                                     const struct intel_plane_state *old_plane_state,
10509                                     struct drm_plane_state *plane_state)
10510 {
10511         struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc_state);
10512         struct drm_crtc *crtc = crtc_state->crtc;
10513         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10514         struct intel_plane *plane = to_intel_plane(plane_state->plane);
10515         struct drm_device *dev = crtc->dev;
10516         struct drm_i915_private *dev_priv = to_i915(dev);
10517         bool mode_changed = needs_modeset(crtc_state);
10518         bool was_crtc_enabled = old_crtc_state->base.active;
10519         bool is_crtc_enabled = crtc_state->active;
10520         bool turn_off, turn_on, visible, was_visible;
10521         struct drm_framebuffer *fb = plane_state->fb;
10522         int ret;
10523
10524         if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
10525                 ret = skl_update_scaler_plane(
10526                         to_intel_crtc_state(crtc_state),
10527                         to_intel_plane_state(plane_state));
10528                 if (ret)
10529                         return ret;
10530         }
10531
10532         was_visible = old_plane_state->base.visible;
10533         visible = plane_state->visible;
10534
10535         if (!was_crtc_enabled && WARN_ON(was_visible))
10536                 was_visible = false;
10537
10538         /*
10539          * Visibility is calculated as if the crtc was on, but
10540          * after scaler setup everything depends on it being off
10541          * when the crtc isn't active.
10542          *
10543          * FIXME this is wrong for watermarks. Watermarks should also
10544          * be computed as if the pipe would be active. Perhaps move
10545          * per-plane wm computation to the .check_plane() hook, and
10546          * only combine the results from all planes in the current place?
10547          */
10548         if (!is_crtc_enabled) {
10549                 plane_state->visible = visible = false;
10550                 to_intel_crtc_state(crtc_state)->active_planes &= ~BIT(plane->id);
10551         }
10552
10553         if (!was_visible && !visible)
10554                 return 0;
10555
10556         if (fb != old_plane_state->base.fb)
10557                 pipe_config->fb_changed = true;
10558
10559         turn_off = was_visible && (!visible || mode_changed);
10560         turn_on = visible && (!was_visible || mode_changed);
10561
10562         DRM_DEBUG_ATOMIC("[CRTC:%d:%s] has [PLANE:%d:%s] with fb %i\n",
10563                          intel_crtc->base.base.id, intel_crtc->base.name,
10564                          plane->base.base.id, plane->base.name,
10565                          fb ? fb->base.id : -1);
10566
10567         DRM_DEBUG_ATOMIC("[PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
10568                          plane->base.base.id, plane->base.name,
10569                          was_visible, visible,
10570                          turn_off, turn_on, mode_changed);
10571
10572         if (turn_on) {
10573                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
10574                         pipe_config->update_wm_pre = true;
10575
10576                 /* must disable cxsr around plane enable/disable */
10577                 if (plane->id != PLANE_CURSOR)
10578                         pipe_config->disable_cxsr = true;
10579         } else if (turn_off) {
10580                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
10581                         pipe_config->update_wm_post = true;
10582
10583                 /* must disable cxsr around plane enable/disable */
10584                 if (plane->id != PLANE_CURSOR)
10585                         pipe_config->disable_cxsr = true;
10586         } else if (intel_wm_need_update(&plane->base, plane_state)) {
10587                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
10588                         /* FIXME bollocks */
10589                         pipe_config->update_wm_pre = true;
10590                         pipe_config->update_wm_post = true;
10591                 }
10592         }
10593
10594         if (visible || was_visible)
10595                 pipe_config->fb_bits |= plane->frontbuffer_bit;
10596
10597         /*
10598          * WaCxSRDisabledForSpriteScaling:ivb
10599          *
10600          * cstate->update_wm was already set above, so this flag will
10601          * take effect when we commit and program watermarks.
10602          */
10603         if (plane->id == PLANE_SPRITE0 && IS_IVYBRIDGE(dev_priv) &&
10604             needs_scaling(to_intel_plane_state(plane_state)) &&
10605             !needs_scaling(old_plane_state))
10606                 pipe_config->disable_lp_wm = true;
10607
10608         return 0;
10609 }
10610
10611 static bool encoders_cloneable(const struct intel_encoder *a,
10612                                const struct intel_encoder *b)
10613 {
10614         /* masks could be asymmetric, so check both ways */
10615         return a == b || (a->cloneable & (1 << b->type) &&
10616                           b->cloneable & (1 << a->type));
10617 }
10618
10619 static bool check_single_encoder_cloning(struct drm_atomic_state *state,
10620                                          struct intel_crtc *crtc,
10621                                          struct intel_encoder *encoder)
10622 {
10623         struct intel_encoder *source_encoder;
10624         struct drm_connector *connector;
10625         struct drm_connector_state *connector_state;
10626         int i;
10627
10628         for_each_new_connector_in_state(state, connector, connector_state, i) {
10629                 if (connector_state->crtc != &crtc->base)
10630                         continue;
10631
10632                 source_encoder =
10633                         to_intel_encoder(connector_state->best_encoder);
10634                 if (!encoders_cloneable(encoder, source_encoder))
10635                         return false;
10636         }
10637
10638         return true;
10639 }
10640
10641 static int intel_crtc_atomic_check(struct drm_crtc *crtc,
10642                                    struct drm_crtc_state *crtc_state)
10643 {
10644         struct drm_device *dev = crtc->dev;
10645         struct drm_i915_private *dev_priv = to_i915(dev);
10646         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10647         struct intel_crtc_state *pipe_config =
10648                 to_intel_crtc_state(crtc_state);
10649         struct drm_atomic_state *state = crtc_state->state;
10650         int ret;
10651         bool mode_changed = needs_modeset(crtc_state);
10652
10653         if (mode_changed && !crtc_state->active)
10654                 pipe_config->update_wm_post = true;
10655
10656         if (mode_changed && crtc_state->enable &&
10657             dev_priv->display.crtc_compute_clock &&
10658             !WARN_ON(pipe_config->shared_dpll)) {
10659                 ret = dev_priv->display.crtc_compute_clock(intel_crtc,
10660                                                            pipe_config);
10661                 if (ret)
10662                         return ret;
10663         }
10664
10665         if (crtc_state->color_mgmt_changed) {
10666                 ret = intel_color_check(crtc, crtc_state);
10667                 if (ret)
10668                         return ret;
10669
10670                 /*
10671                  * Changing color management on Intel hardware is
10672                  * handled as part of planes update.
10673                  */
10674                 crtc_state->planes_changed = true;
10675         }
10676
10677         ret = 0;
10678         if (dev_priv->display.compute_pipe_wm) {
10679                 ret = dev_priv->display.compute_pipe_wm(pipe_config);
10680                 if (ret) {
10681                         DRM_DEBUG_KMS("Target pipe watermarks are invalid\n");
10682                         return ret;
10683                 }
10684         }
10685
10686         if (dev_priv->display.compute_intermediate_wm &&
10687             !to_intel_atomic_state(state)->skip_intermediate_wm) {
10688                 if (WARN_ON(!dev_priv->display.compute_pipe_wm))
10689                         return 0;
10690
10691                 /*
10692                  * Calculate 'intermediate' watermarks that satisfy both the
10693                  * old state and the new state.  We can program these
10694                  * immediately.
10695                  */
10696                 ret = dev_priv->display.compute_intermediate_wm(dev,
10697                                                                 intel_crtc,
10698                                                                 pipe_config);
10699                 if (ret) {
10700                         DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
10701                         return ret;
10702                 }
10703         } else if (dev_priv->display.compute_intermediate_wm) {
10704                 if (HAS_PCH_SPLIT(dev_priv) && INTEL_GEN(dev_priv) < 9)
10705                         pipe_config->wm.ilk.intermediate = pipe_config->wm.ilk.optimal;
10706         }
10707
10708         if (INTEL_GEN(dev_priv) >= 9) {
10709                 if (mode_changed)
10710                         ret = skl_update_scaler_crtc(pipe_config);
10711
10712                 if (!ret)
10713                         ret = skl_check_pipe_max_pixel_rate(intel_crtc,
10714                                                             pipe_config);
10715                 if (!ret)
10716                         ret = intel_atomic_setup_scalers(dev_priv, intel_crtc,
10717                                                          pipe_config);
10718         }
10719
10720         if (HAS_IPS(dev_priv))
10721                 pipe_config->ips_enabled = hsw_compute_ips_config(pipe_config);
10722
10723         return ret;
10724 }
10725
10726 static const struct drm_crtc_helper_funcs intel_helper_funcs = {
10727         .atomic_begin = intel_begin_crtc_commit,
10728         .atomic_flush = intel_finish_crtc_commit,
10729         .atomic_check = intel_crtc_atomic_check,
10730 };
10731
10732 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
10733 {
10734         struct intel_connector *connector;
10735         struct drm_connector_list_iter conn_iter;
10736
10737         drm_connector_list_iter_begin(dev, &conn_iter);
10738         for_each_intel_connector_iter(connector, &conn_iter) {
10739                 if (connector->base.state->crtc)
10740                         drm_connector_put(&connector->base);
10741
10742                 if (connector->base.encoder) {
10743                         connector->base.state->best_encoder =
10744                                 connector->base.encoder;
10745                         connector->base.state->crtc =
10746                                 connector->base.encoder->crtc;
10747
10748                         drm_connector_get(&connector->base);
10749                 } else {
10750                         connector->base.state->best_encoder = NULL;
10751                         connector->base.state->crtc = NULL;
10752                 }
10753         }
10754         drm_connector_list_iter_end(&conn_iter);
10755 }
10756
10757 static void
10758 connected_sink_compute_bpp(struct intel_connector *connector,
10759                            struct intel_crtc_state *pipe_config)
10760 {
10761         const struct drm_display_info *info = &connector->base.display_info;
10762         int bpp = pipe_config->pipe_bpp;
10763
10764         DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n",
10765                       connector->base.base.id,
10766                       connector->base.name);
10767
10768         /* Don't use an invalid EDID bpc value */
10769         if (info->bpc != 0 && info->bpc * 3 < bpp) {
10770                 DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n",
10771                               bpp, info->bpc * 3);
10772                 pipe_config->pipe_bpp = info->bpc * 3;
10773         }
10774
10775         /* Clamp bpp to 8 on screens without EDID 1.4 */
10776         if (info->bpc == 0 && bpp > 24) {
10777                 DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
10778                               bpp);
10779                 pipe_config->pipe_bpp = 24;
10780         }
10781 }
10782
10783 static int
10784 compute_baseline_pipe_bpp(struct intel_crtc *crtc,
10785                           struct intel_crtc_state *pipe_config)
10786 {
10787         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10788         struct drm_atomic_state *state;
10789         struct drm_connector *connector;
10790         struct drm_connector_state *connector_state;
10791         int bpp, i;
10792
10793         if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
10794             IS_CHERRYVIEW(dev_priv)))
10795                 bpp = 10*3;
10796         else if (INTEL_GEN(dev_priv) >= 5)
10797                 bpp = 12*3;
10798         else
10799                 bpp = 8*3;
10800
10801
10802         pipe_config->pipe_bpp = bpp;
10803
10804         state = pipe_config->base.state;
10805
10806         /* Clamp display bpp to EDID value */
10807         for_each_new_connector_in_state(state, connector, connector_state, i) {
10808                 if (connector_state->crtc != &crtc->base)
10809                         continue;
10810
10811                 connected_sink_compute_bpp(to_intel_connector(connector),
10812                                            pipe_config);
10813         }
10814
10815         return bpp;
10816 }
10817
10818 static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
10819 {
10820         DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
10821                         "type: 0x%x flags: 0x%x\n",
10822                 mode->crtc_clock,
10823                 mode->crtc_hdisplay, mode->crtc_hsync_start,
10824                 mode->crtc_hsync_end, mode->crtc_htotal,
10825                 mode->crtc_vdisplay, mode->crtc_vsync_start,
10826                 mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags);
10827 }
10828
10829 static inline void
10830 intel_dump_m_n_config(struct intel_crtc_state *pipe_config, char *id,
10831                       unsigned int lane_count, struct intel_link_m_n *m_n)
10832 {
10833         DRM_DEBUG_KMS("%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
10834                       id, lane_count,
10835                       m_n->gmch_m, m_n->gmch_n,
10836                       m_n->link_m, m_n->link_n, m_n->tu);
10837 }
10838
10839 #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
10840
10841 static const char * const output_type_str[] = {
10842         OUTPUT_TYPE(UNUSED),
10843         OUTPUT_TYPE(ANALOG),
10844         OUTPUT_TYPE(DVO),
10845         OUTPUT_TYPE(SDVO),
10846         OUTPUT_TYPE(LVDS),
10847         OUTPUT_TYPE(TVOUT),
10848         OUTPUT_TYPE(HDMI),
10849         OUTPUT_TYPE(DP),
10850         OUTPUT_TYPE(EDP),
10851         OUTPUT_TYPE(DSI),
10852         OUTPUT_TYPE(DDI),
10853         OUTPUT_TYPE(DP_MST),
10854 };
10855
10856 #undef OUTPUT_TYPE
10857
10858 static void snprintf_output_types(char *buf, size_t len,
10859                                   unsigned int output_types)
10860 {
10861         char *str = buf;
10862         int i;
10863
10864         str[0] = '\0';
10865
10866         for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
10867                 int r;
10868
10869                 if ((output_types & BIT(i)) == 0)
10870                         continue;
10871
10872                 r = snprintf(str, len, "%s%s",
10873                              str != buf ? "," : "", output_type_str[i]);
10874                 if (r >= len)
10875                         break;
10876                 str += r;
10877                 len -= r;
10878
10879                 output_types &= ~BIT(i);
10880         }
10881
10882         WARN_ON_ONCE(output_types != 0);
10883 }
10884
10885 static void intel_dump_pipe_config(struct intel_crtc *crtc,
10886                                    struct intel_crtc_state *pipe_config,
10887                                    const char *context)
10888 {
10889         struct drm_device *dev = crtc->base.dev;
10890         struct drm_i915_private *dev_priv = to_i915(dev);
10891         struct drm_plane *plane;
10892         struct intel_plane *intel_plane;
10893         struct intel_plane_state *state;
10894         struct drm_framebuffer *fb;
10895         char buf[64];
10896
10897         DRM_DEBUG_KMS("[CRTC:%d:%s]%s\n",
10898                       crtc->base.base.id, crtc->base.name, context);
10899
10900         snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
10901         DRM_DEBUG_KMS("output_types: %s (0x%x)\n",
10902                       buf, pipe_config->output_types);
10903
10904         DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
10905                       transcoder_name(pipe_config->cpu_transcoder),
10906                       pipe_config->pipe_bpp, pipe_config->dither);
10907
10908         if (pipe_config->has_pch_encoder)
10909                 intel_dump_m_n_config(pipe_config, "fdi",
10910                                       pipe_config->fdi_lanes,
10911                                       &pipe_config->fdi_m_n);
10912
10913         if (pipe_config->ycbcr420)
10914                 DRM_DEBUG_KMS("YCbCr 4:2:0 output enabled\n");
10915
10916         if (intel_crtc_has_dp_encoder(pipe_config)) {
10917                 intel_dump_m_n_config(pipe_config, "dp m_n",
10918                                 pipe_config->lane_count, &pipe_config->dp_m_n);
10919                 if (pipe_config->has_drrs)
10920                         intel_dump_m_n_config(pipe_config, "dp m2_n2",
10921                                               pipe_config->lane_count,
10922                                               &pipe_config->dp_m2_n2);
10923         }
10924
10925         DRM_DEBUG_KMS("audio: %i, infoframes: %i\n",
10926                       pipe_config->has_audio, pipe_config->has_infoframe);
10927
10928         DRM_DEBUG_KMS("requested mode:\n");
10929         drm_mode_debug_printmodeline(&pipe_config->base.mode);
10930         DRM_DEBUG_KMS("adjusted mode:\n");
10931         drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode);
10932         intel_dump_crtc_timings(&pipe_config->base.adjusted_mode);
10933         DRM_DEBUG_KMS("port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
10934                       pipe_config->port_clock,
10935                       pipe_config->pipe_src_w, pipe_config->pipe_src_h,
10936                       pipe_config->pixel_rate);
10937
10938         if (INTEL_GEN(dev_priv) >= 9)
10939                 DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
10940                               crtc->num_scalers,
10941                               pipe_config->scaler_state.scaler_users,
10942                               pipe_config->scaler_state.scaler_id);
10943
10944         if (HAS_GMCH_DISPLAY(dev_priv))
10945                 DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
10946                               pipe_config->gmch_pfit.control,
10947                               pipe_config->gmch_pfit.pgm_ratios,
10948                               pipe_config->gmch_pfit.lvds_border_bits);
10949         else
10950                 DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
10951                               pipe_config->pch_pfit.pos,
10952                               pipe_config->pch_pfit.size,
10953                               enableddisabled(pipe_config->pch_pfit.enabled));
10954
10955         DRM_DEBUG_KMS("ips: %i, double wide: %i\n",
10956                       pipe_config->ips_enabled, pipe_config->double_wide);
10957
10958         intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
10959
10960         DRM_DEBUG_KMS("planes on this crtc\n");
10961         list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
10962                 struct drm_format_name_buf format_name;
10963                 intel_plane = to_intel_plane(plane);
10964                 if (intel_plane->pipe != crtc->pipe)
10965                         continue;
10966
10967                 state = to_intel_plane_state(plane->state);
10968                 fb = state->base.fb;
10969                 if (!fb) {
10970                         DRM_DEBUG_KMS("[PLANE:%d:%s] disabled, scaler_id = %d\n",
10971                                       plane->base.id, plane->name, state->scaler_id);
10972                         continue;
10973                 }
10974
10975                 DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d, fb = %ux%u format = %s\n",
10976                               plane->base.id, plane->name,
10977                               fb->base.id, fb->width, fb->height,
10978                               drm_get_format_name(fb->format->format, &format_name));
10979                 if (INTEL_GEN(dev_priv) >= 9)
10980                         DRM_DEBUG_KMS("\tscaler:%d src %dx%d+%d+%d dst %dx%d+%d+%d\n",
10981                                       state->scaler_id,
10982                                       state->base.src.x1 >> 16,
10983                                       state->base.src.y1 >> 16,
10984                                       drm_rect_width(&state->base.src) >> 16,
10985                                       drm_rect_height(&state->base.src) >> 16,
10986                                       state->base.dst.x1, state->base.dst.y1,
10987                                       drm_rect_width(&state->base.dst),
10988                                       drm_rect_height(&state->base.dst));
10989         }
10990 }
10991
10992 static bool check_digital_port_conflicts(struct drm_atomic_state *state)
10993 {
10994         struct drm_device *dev = state->dev;
10995         struct drm_connector *connector;
10996         struct drm_connector_list_iter conn_iter;
10997         unsigned int used_ports = 0;
10998         unsigned int used_mst_ports = 0;
10999         bool ret = true;
11000
11001         /*
11002          * Walk the connector list instead of the encoder
11003          * list to detect the problem on ddi platforms
11004          * where there's just one encoder per digital port.
11005          */
11006         drm_connector_list_iter_begin(dev, &conn_iter);
11007         drm_for_each_connector_iter(connector, &conn_iter) {
11008                 struct drm_connector_state *connector_state;
11009                 struct intel_encoder *encoder;
11010
11011                 connector_state = drm_atomic_get_new_connector_state(state, connector);
11012                 if (!connector_state)
11013                         connector_state = connector->state;
11014
11015                 if (!connector_state->best_encoder)
11016                         continue;
11017
11018                 encoder = to_intel_encoder(connector_state->best_encoder);
11019
11020                 WARN_ON(!connector_state->crtc);
11021
11022                 switch (encoder->type) {
11023                         unsigned int port_mask;
11024                 case INTEL_OUTPUT_DDI:
11025                         if (WARN_ON(!HAS_DDI(to_i915(dev))))
11026                                 break;
11027                         /* else: fall through */
11028                 case INTEL_OUTPUT_DP:
11029                 case INTEL_OUTPUT_HDMI:
11030                 case INTEL_OUTPUT_EDP:
11031                         port_mask = 1 << encoder->port;
11032
11033                         /* the same port mustn't appear more than once */
11034                         if (used_ports & port_mask)
11035                                 ret = false;
11036
11037                         used_ports |= port_mask;
11038                         break;
11039                 case INTEL_OUTPUT_DP_MST:
11040                         used_mst_ports |=
11041                                 1 << encoder->port;
11042                         break;
11043                 default:
11044                         break;
11045                 }
11046         }
11047         drm_connector_list_iter_end(&conn_iter);
11048
11049         /* can't mix MST and SST/HDMI on the same port */
11050         if (used_ports & used_mst_ports)
11051                 return false;
11052
11053         return ret;
11054 }
11055
11056 static void
11057 clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
11058 {
11059         struct drm_i915_private *dev_priv =
11060                 to_i915(crtc_state->base.crtc->dev);
11061         struct intel_crtc_scaler_state scaler_state;
11062         struct intel_dpll_hw_state dpll_hw_state;
11063         struct intel_shared_dpll *shared_dpll;
11064         struct intel_crtc_wm_state wm_state;
11065         bool force_thru, ips_force_disable;
11066
11067         /* FIXME: before the switch to atomic started, a new pipe_config was
11068          * kzalloc'd. Code that depends on any field being zero should be
11069          * fixed, so that the crtc_state can be safely duplicated. For now,
11070          * only fields that are know to not cause problems are preserved. */
11071
11072         scaler_state = crtc_state->scaler_state;
11073         shared_dpll = crtc_state->shared_dpll;
11074         dpll_hw_state = crtc_state->dpll_hw_state;
11075         force_thru = crtc_state->pch_pfit.force_thru;
11076         ips_force_disable = crtc_state->ips_force_disable;
11077         if (IS_G4X(dev_priv) ||
11078             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
11079                 wm_state = crtc_state->wm;
11080
11081         /* Keep base drm_crtc_state intact, only clear our extended struct */
11082         BUILD_BUG_ON(offsetof(struct intel_crtc_state, base));
11083         memset(&crtc_state->base + 1, 0,
11084                sizeof(*crtc_state) - sizeof(crtc_state->base));
11085
11086         crtc_state->scaler_state = scaler_state;
11087         crtc_state->shared_dpll = shared_dpll;
11088         crtc_state->dpll_hw_state = dpll_hw_state;
11089         crtc_state->pch_pfit.force_thru = force_thru;
11090         crtc_state->ips_force_disable = ips_force_disable;
11091         if (IS_G4X(dev_priv) ||
11092             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
11093                 crtc_state->wm = wm_state;
11094 }
11095
11096 static int
11097 intel_modeset_pipe_config(struct drm_crtc *crtc,
11098                           struct intel_crtc_state *pipe_config)
11099 {
11100         struct drm_atomic_state *state = pipe_config->base.state;
11101         struct intel_encoder *encoder;
11102         struct drm_connector *connector;
11103         struct drm_connector_state *connector_state;
11104         int base_bpp, ret = -EINVAL;
11105         int i;
11106         bool retry = true;
11107
11108         clear_intel_crtc_state(pipe_config);
11109
11110         pipe_config->cpu_transcoder =
11111                 (enum transcoder) to_intel_crtc(crtc)->pipe;
11112
11113         /*
11114          * Sanitize sync polarity flags based on requested ones. If neither
11115          * positive or negative polarity is requested, treat this as meaning
11116          * negative polarity.
11117          */
11118         if (!(pipe_config->base.adjusted_mode.flags &
11119               (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
11120                 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
11121
11122         if (!(pipe_config->base.adjusted_mode.flags &
11123               (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
11124                 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
11125
11126         base_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
11127                                              pipe_config);
11128         if (base_bpp < 0)
11129                 goto fail;
11130
11131         /*
11132          * Determine the real pipe dimensions. Note that stereo modes can
11133          * increase the actual pipe size due to the frame doubling and
11134          * insertion of additional space for blanks between the frame. This
11135          * is stored in the crtc timings. We use the requested mode to do this
11136          * computation to clearly distinguish it from the adjusted mode, which
11137          * can be changed by the connectors in the below retry loop.
11138          */
11139         drm_mode_get_hv_timing(&pipe_config->base.mode,
11140                                &pipe_config->pipe_src_w,
11141                                &pipe_config->pipe_src_h);
11142
11143         for_each_new_connector_in_state(state, connector, connector_state, i) {
11144                 if (connector_state->crtc != crtc)
11145                         continue;
11146
11147                 encoder = to_intel_encoder(connector_state->best_encoder);
11148
11149                 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
11150                         DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
11151                         goto fail;
11152                 }
11153
11154                 /*
11155                  * Determine output_types before calling the .compute_config()
11156                  * hooks so that the hooks can use this information safely.
11157                  */
11158                 if (encoder->compute_output_type)
11159                         pipe_config->output_types |=
11160                                 BIT(encoder->compute_output_type(encoder, pipe_config,
11161                                                                  connector_state));
11162                 else
11163                         pipe_config->output_types |= BIT(encoder->type);
11164         }
11165
11166 encoder_retry:
11167         /* Ensure the port clock defaults are reset when retrying. */
11168         pipe_config->port_clock = 0;
11169         pipe_config->pixel_multiplier = 1;
11170
11171         /* Fill in default crtc timings, allow encoders to overwrite them. */
11172         drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode,
11173                               CRTC_STEREO_DOUBLE);
11174
11175         /* Pass our mode to the connectors and the CRTC to give them a chance to
11176          * adjust it according to limitations or connector properties, and also
11177          * a chance to reject the mode entirely.
11178          */
11179         for_each_new_connector_in_state(state, connector, connector_state, i) {
11180                 if (connector_state->crtc != crtc)
11181                         continue;
11182
11183                 encoder = to_intel_encoder(connector_state->best_encoder);
11184
11185                 if (!(encoder->compute_config(encoder, pipe_config, connector_state))) {
11186                         DRM_DEBUG_KMS("Encoder config failure\n");
11187                         goto fail;
11188                 }
11189         }
11190
11191         /* Set default port clock if not overwritten by the encoder. Needs to be
11192          * done afterwards in case the encoder adjusts the mode. */
11193         if (!pipe_config->port_clock)
11194                 pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock
11195                         * pipe_config->pixel_multiplier;
11196
11197         ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
11198         if (ret < 0) {
11199                 DRM_DEBUG_KMS("CRTC fixup failed\n");
11200                 goto fail;
11201         }
11202
11203         if (ret == RETRY) {
11204                 if (WARN(!retry, "loop in pipe configuration computation\n")) {
11205                         ret = -EINVAL;
11206                         goto fail;
11207                 }
11208
11209                 DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
11210                 retry = false;
11211                 goto encoder_retry;
11212         }
11213
11214         /* Dithering seems to not pass-through bits correctly when it should, so
11215          * only enable it on 6bpc panels and when its not a compliance
11216          * test requesting 6bpc video pattern.
11217          */
11218         pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
11219                 !pipe_config->dither_force_disable;
11220         DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
11221                       base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
11222
11223 fail:
11224         return ret;
11225 }
11226
11227 static bool intel_fuzzy_clock_check(int clock1, int clock2)
11228 {
11229         int diff;
11230
11231         if (clock1 == clock2)
11232                 return true;
11233
11234         if (!clock1 || !clock2)
11235                 return false;
11236
11237         diff = abs(clock1 - clock2);
11238
11239         if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
11240                 return true;
11241
11242         return false;
11243 }
11244
11245 static bool
11246 intel_compare_m_n(unsigned int m, unsigned int n,
11247                   unsigned int m2, unsigned int n2,
11248                   bool exact)
11249 {
11250         if (m == m2 && n == n2)
11251                 return true;
11252
11253         if (exact || !m || !n || !m2 || !n2)
11254                 return false;
11255
11256         BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
11257
11258         if (n > n2) {
11259                 while (n > n2) {
11260                         m2 <<= 1;
11261                         n2 <<= 1;
11262                 }
11263         } else if (n < n2) {
11264                 while (n < n2) {
11265                         m <<= 1;
11266                         n <<= 1;
11267                 }
11268         }
11269
11270         if (n != n2)
11271                 return false;
11272
11273         return intel_fuzzy_clock_check(m, m2);
11274 }
11275
11276 static bool
11277 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
11278                        struct intel_link_m_n *m2_n2,
11279                        bool adjust)
11280 {
11281         if (m_n->tu == m2_n2->tu &&
11282             intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
11283                               m2_n2->gmch_m, m2_n2->gmch_n, !adjust) &&
11284             intel_compare_m_n(m_n->link_m, m_n->link_n,
11285                               m2_n2->link_m, m2_n2->link_n, !adjust)) {
11286                 if (adjust)
11287                         *m2_n2 = *m_n;
11288
11289                 return true;
11290         }
11291
11292         return false;
11293 }
11294
11295 static void __printf(3, 4)
11296 pipe_config_err(bool adjust, const char *name, const char *format, ...)
11297 {
11298         struct va_format vaf;
11299         va_list args;
11300
11301         va_start(args, format);
11302         vaf.fmt = format;
11303         vaf.va = &args;
11304
11305         if (adjust)
11306                 drm_dbg(DRM_UT_KMS, "mismatch in %s %pV", name, &vaf);
11307         else
11308                 drm_err("mismatch in %s %pV", name, &vaf);
11309
11310         va_end(args);
11311 }
11312
11313 static bool
11314 intel_pipe_config_compare(struct drm_i915_private *dev_priv,
11315                           struct intel_crtc_state *current_config,
11316                           struct intel_crtc_state *pipe_config,
11317                           bool adjust)
11318 {
11319         bool ret = true;
11320         bool fixup_inherited = adjust &&
11321                 (current_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED) &&
11322                 !(pipe_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED);
11323
11324 #define PIPE_CONF_CHECK_X(name) do { \
11325         if (current_config->name != pipe_config->name) { \
11326                 pipe_config_err(adjust, __stringify(name), \
11327                           "(expected 0x%08x, found 0x%08x)\n", \
11328                           current_config->name, \
11329                           pipe_config->name); \
11330                 ret = false; \
11331         } \
11332 } while (0)
11333
11334 #define PIPE_CONF_CHECK_I(name) do { \
11335         if (current_config->name != pipe_config->name) { \
11336                 pipe_config_err(adjust, __stringify(name), \
11337                           "(expected %i, found %i)\n", \
11338                           current_config->name, \
11339                           pipe_config->name); \
11340                 ret = false; \
11341         } \
11342 } while (0)
11343
11344 #define PIPE_CONF_CHECK_BOOL(name) do { \
11345         if (current_config->name != pipe_config->name) { \
11346                 pipe_config_err(adjust, __stringify(name), \
11347                           "(expected %s, found %s)\n", \
11348                           yesno(current_config->name), \
11349                           yesno(pipe_config->name)); \
11350                 ret = false; \
11351         } \
11352 } while (0)
11353
11354 /*
11355  * Checks state where we only read out the enabling, but not the entire
11356  * state itself (like full infoframes or ELD for audio). These states
11357  * require a full modeset on bootup to fix up.
11358  */
11359 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
11360         if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
11361                 PIPE_CONF_CHECK_BOOL(name); \
11362         } else { \
11363                 pipe_config_err(adjust, __stringify(name), \
11364                           "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)\n", \
11365                           yesno(current_config->name), \
11366                           yesno(pipe_config->name)); \
11367                 ret = false; \
11368         } \
11369 } while (0)
11370
11371 #define PIPE_CONF_CHECK_P(name) do { \
11372         if (current_config->name != pipe_config->name) { \
11373                 pipe_config_err(adjust, __stringify(name), \
11374                           "(expected %p, found %p)\n", \
11375                           current_config->name, \
11376                           pipe_config->name); \
11377                 ret = false; \
11378         } \
11379 } while (0)
11380
11381 #define PIPE_CONF_CHECK_M_N(name) do { \
11382         if (!intel_compare_link_m_n(&current_config->name, \
11383                                     &pipe_config->name,\
11384                                     adjust)) { \
11385                 pipe_config_err(adjust, __stringify(name), \
11386                           "(expected tu %i gmch %i/%i link %i/%i, " \
11387                           "found tu %i, gmch %i/%i link %i/%i)\n", \
11388                           current_config->name.tu, \
11389                           current_config->name.gmch_m, \
11390                           current_config->name.gmch_n, \
11391                           current_config->name.link_m, \
11392                           current_config->name.link_n, \
11393                           pipe_config->name.tu, \
11394                           pipe_config->name.gmch_m, \
11395                           pipe_config->name.gmch_n, \
11396                           pipe_config->name.link_m, \
11397                           pipe_config->name.link_n); \
11398                 ret = false; \
11399         } \
11400 } while (0)
11401
11402 /* This is required for BDW+ where there is only one set of registers for
11403  * switching between high and low RR.
11404  * This macro can be used whenever a comparison has to be made between one
11405  * hw state and multiple sw state variables.
11406  */
11407 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
11408         if (!intel_compare_link_m_n(&current_config->name, \
11409                                     &pipe_config->name, adjust) && \
11410             !intel_compare_link_m_n(&current_config->alt_name, \
11411                                     &pipe_config->name, adjust)) { \
11412                 pipe_config_err(adjust, __stringify(name), \
11413                           "(expected tu %i gmch %i/%i link %i/%i, " \
11414                           "or tu %i gmch %i/%i link %i/%i, " \
11415                           "found tu %i, gmch %i/%i link %i/%i)\n", \
11416                           current_config->name.tu, \
11417                           current_config->name.gmch_m, \
11418                           current_config->name.gmch_n, \
11419                           current_config->name.link_m, \
11420                           current_config->name.link_n, \
11421                           current_config->alt_name.tu, \
11422                           current_config->alt_name.gmch_m, \
11423                           current_config->alt_name.gmch_n, \
11424                           current_config->alt_name.link_m, \
11425                           current_config->alt_name.link_n, \
11426                           pipe_config->name.tu, \
11427                           pipe_config->name.gmch_m, \
11428                           pipe_config->name.gmch_n, \
11429                           pipe_config->name.link_m, \
11430                           pipe_config->name.link_n); \
11431                 ret = false; \
11432         } \
11433 } while (0)
11434
11435 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
11436         if ((current_config->name ^ pipe_config->name) & (mask)) { \
11437                 pipe_config_err(adjust, __stringify(name), \
11438                           "(%x) (expected %i, found %i)\n", \
11439                           (mask), \
11440                           current_config->name & (mask), \
11441                           pipe_config->name & (mask)); \
11442                 ret = false; \
11443         } \
11444 } while (0)
11445
11446 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
11447         if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
11448                 pipe_config_err(adjust, __stringify(name), \
11449                           "(expected %i, found %i)\n", \
11450                           current_config->name, \
11451                           pipe_config->name); \
11452                 ret = false; \
11453         } \
11454 } while (0)
11455
11456 #define PIPE_CONF_QUIRK(quirk)  \
11457         ((current_config->quirks | pipe_config->quirks) & (quirk))
11458
11459         PIPE_CONF_CHECK_I(cpu_transcoder);
11460
11461         PIPE_CONF_CHECK_BOOL(has_pch_encoder);
11462         PIPE_CONF_CHECK_I(fdi_lanes);
11463         PIPE_CONF_CHECK_M_N(fdi_m_n);
11464
11465         PIPE_CONF_CHECK_I(lane_count);
11466         PIPE_CONF_CHECK_X(lane_lat_optim_mask);
11467
11468         if (INTEL_GEN(dev_priv) < 8) {
11469                 PIPE_CONF_CHECK_M_N(dp_m_n);
11470
11471                 if (current_config->has_drrs)
11472                         PIPE_CONF_CHECK_M_N(dp_m2_n2);
11473         } else
11474                 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
11475
11476         PIPE_CONF_CHECK_X(output_types);
11477
11478         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
11479         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
11480         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start);
11481         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end);
11482         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start);
11483         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end);
11484
11485         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay);
11486         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal);
11487         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start);
11488         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end);
11489         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start);
11490         PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end);
11491
11492         PIPE_CONF_CHECK_I(pixel_multiplier);
11493         PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
11494         if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
11495             IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
11496                 PIPE_CONF_CHECK_BOOL(limited_color_range);
11497
11498         PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
11499         PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
11500         PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_infoframe);
11501         PIPE_CONF_CHECK_BOOL(ycbcr420);
11502
11503         PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
11504
11505         PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
11506                               DRM_MODE_FLAG_INTERLACE);
11507
11508         if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
11509                 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
11510                                       DRM_MODE_FLAG_PHSYNC);
11511                 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
11512                                       DRM_MODE_FLAG_NHSYNC);
11513                 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
11514                                       DRM_MODE_FLAG_PVSYNC);
11515                 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
11516                                       DRM_MODE_FLAG_NVSYNC);
11517         }
11518
11519         PIPE_CONF_CHECK_X(gmch_pfit.control);
11520         /* pfit ratios are autocomputed by the hw on gen4+ */
11521         if (INTEL_GEN(dev_priv) < 4)
11522                 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
11523         PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
11524
11525         if (!adjust) {
11526                 PIPE_CONF_CHECK_I(pipe_src_w);
11527                 PIPE_CONF_CHECK_I(pipe_src_h);
11528
11529                 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
11530                 if (current_config->pch_pfit.enabled) {
11531                         PIPE_CONF_CHECK_X(pch_pfit.pos);
11532                         PIPE_CONF_CHECK_X(pch_pfit.size);
11533                 }
11534
11535                 PIPE_CONF_CHECK_I(scaler_state.scaler_id);
11536                 PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
11537         }
11538
11539         PIPE_CONF_CHECK_BOOL(double_wide);
11540
11541         PIPE_CONF_CHECK_P(shared_dpll);
11542         PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
11543         PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
11544         PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
11545         PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
11546         PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
11547         PIPE_CONF_CHECK_X(dpll_hw_state.spll);
11548         PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
11549         PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
11550         PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
11551         PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
11552         PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
11553         PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
11554         PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
11555         PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
11556         PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
11557         PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
11558         PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
11559         PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
11560         PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
11561         PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
11562         PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
11563         PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
11564         PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
11565         PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
11566         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
11567         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
11568         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
11569         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
11570         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
11571         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
11572         PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
11573
11574         PIPE_CONF_CHECK_X(dsi_pll.ctrl);
11575         PIPE_CONF_CHECK_X(dsi_pll.div);
11576
11577         if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5)
11578                 PIPE_CONF_CHECK_I(pipe_bpp);
11579
11580         PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock);
11581         PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
11582
11583         PIPE_CONF_CHECK_I(min_voltage_level);
11584
11585 #undef PIPE_CONF_CHECK_X
11586 #undef PIPE_CONF_CHECK_I
11587 #undef PIPE_CONF_CHECK_BOOL
11588 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
11589 #undef PIPE_CONF_CHECK_P
11590 #undef PIPE_CONF_CHECK_FLAGS
11591 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
11592 #undef PIPE_CONF_QUIRK
11593
11594         return ret;
11595 }
11596
11597 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
11598                                            const struct intel_crtc_state *pipe_config)
11599 {
11600         if (pipe_config->has_pch_encoder) {
11601                 int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
11602                                                             &pipe_config->fdi_m_n);
11603                 int dotclock = pipe_config->base.adjusted_mode.crtc_clock;
11604
11605                 /*
11606                  * FDI already provided one idea for the dotclock.
11607                  * Yell if the encoder disagrees.
11608                  */
11609                 WARN(!intel_fuzzy_clock_check(fdi_dotclock, dotclock),
11610                      "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
11611                      fdi_dotclock, dotclock);
11612         }
11613 }
11614
11615 static void verify_wm_state(struct drm_crtc *crtc,
11616                             struct drm_crtc_state *new_state)
11617 {
11618         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
11619         struct skl_ddb_allocation hw_ddb, *sw_ddb;
11620         struct skl_pipe_wm hw_wm, *sw_wm;
11621         struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
11622         struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
11623         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11624         const enum pipe pipe = intel_crtc->pipe;
11625         int plane, level, max_level = ilk_wm_max_level(dev_priv);
11626
11627         if (INTEL_GEN(dev_priv) < 9 || !new_state->active)
11628                 return;
11629
11630         skl_pipe_wm_get_hw_state(crtc, &hw_wm);
11631         sw_wm = &to_intel_crtc_state(new_state)->wm.skl.optimal;
11632
11633         skl_ddb_get_hw_state(dev_priv, &hw_ddb);
11634         sw_ddb = &dev_priv->wm.skl_hw.ddb;
11635
11636         if (INTEL_GEN(dev_priv) >= 11)
11637                 if (hw_ddb.enabled_slices != sw_ddb->enabled_slices)
11638                         DRM_ERROR("mismatch in DBUF Slices (expected %u, got %u)\n",
11639                                   sw_ddb->enabled_slices,
11640                                   hw_ddb.enabled_slices);
11641         /* planes */
11642         for_each_universal_plane(dev_priv, pipe, plane) {
11643                 hw_plane_wm = &hw_wm.planes[plane];
11644                 sw_plane_wm = &sw_wm->planes[plane];
11645
11646                 /* Watermarks */
11647                 for (level = 0; level <= max_level; level++) {
11648                         if (skl_wm_level_equals(&hw_plane_wm->wm[level],
11649                                                 &sw_plane_wm->wm[level]))
11650                                 continue;
11651
11652                         DRM_ERROR("mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
11653                                   pipe_name(pipe), plane + 1, level,
11654                                   sw_plane_wm->wm[level].plane_en,
11655                                   sw_plane_wm->wm[level].plane_res_b,
11656                                   sw_plane_wm->wm[level].plane_res_l,
11657                                   hw_plane_wm->wm[level].plane_en,
11658                                   hw_plane_wm->wm[level].plane_res_b,
11659                                   hw_plane_wm->wm[level].plane_res_l);
11660                 }
11661
11662                 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
11663                                          &sw_plane_wm->trans_wm)) {
11664                         DRM_ERROR("mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
11665                                   pipe_name(pipe), plane + 1,
11666                                   sw_plane_wm->trans_wm.plane_en,
11667                                   sw_plane_wm->trans_wm.plane_res_b,
11668                                   sw_plane_wm->trans_wm.plane_res_l,
11669                                   hw_plane_wm->trans_wm.plane_en,
11670                                   hw_plane_wm->trans_wm.plane_res_b,
11671                                   hw_plane_wm->trans_wm.plane_res_l);
11672                 }
11673
11674                 /* DDB */
11675                 hw_ddb_entry = &hw_ddb.plane[pipe][plane];
11676                 sw_ddb_entry = &sw_ddb->plane[pipe][plane];
11677
11678                 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
11679                         DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
11680                                   pipe_name(pipe), plane + 1,
11681                                   sw_ddb_entry->start, sw_ddb_entry->end,
11682                                   hw_ddb_entry->start, hw_ddb_entry->end);
11683                 }
11684         }
11685
11686         /*
11687          * cursor
11688          * If the cursor plane isn't active, we may not have updated it's ddb
11689          * allocation. In that case since the ddb allocation will be updated
11690          * once the plane becomes visible, we can skip this check
11691          */
11692         if (1) {
11693                 hw_plane_wm = &hw_wm.planes[PLANE_CURSOR];
11694                 sw_plane_wm = &sw_wm->planes[PLANE_CURSOR];
11695
11696                 /* Watermarks */
11697                 for (level = 0; level <= max_level; level++) {
11698                         if (skl_wm_level_equals(&hw_plane_wm->wm[level],
11699                                                 &sw_plane_wm->wm[level]))
11700                                 continue;
11701
11702                         DRM_ERROR("mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
11703                                   pipe_name(pipe), level,
11704                                   sw_plane_wm->wm[level].plane_en,
11705                                   sw_plane_wm->wm[level].plane_res_b,
11706                                   sw_plane_wm->wm[level].plane_res_l,
11707                                   hw_plane_wm->wm[level].plane_en,
11708                                   hw_plane_wm->wm[level].plane_res_b,
11709                                   hw_plane_wm->wm[level].plane_res_l);
11710                 }
11711
11712                 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
11713                                          &sw_plane_wm->trans_wm)) {
11714                         DRM_ERROR("mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
11715                                   pipe_name(pipe),
11716                                   sw_plane_wm->trans_wm.plane_en,
11717                                   sw_plane_wm->trans_wm.plane_res_b,
11718                                   sw_plane_wm->trans_wm.plane_res_l,
11719                                   hw_plane_wm->trans_wm.plane_en,
11720                                   hw_plane_wm->trans_wm.plane_res_b,
11721                                   hw_plane_wm->trans_wm.plane_res_l);
11722                 }
11723
11724                 /* DDB */
11725                 hw_ddb_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
11726                 sw_ddb_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
11727
11728                 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
11729                         DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
11730                                   pipe_name(pipe),
11731                                   sw_ddb_entry->start, sw_ddb_entry->end,
11732                                   hw_ddb_entry->start, hw_ddb_entry->end);
11733                 }
11734         }
11735 }
11736
11737 static void
11738 verify_connector_state(struct drm_device *dev,
11739                        struct drm_atomic_state *state,
11740                        struct drm_crtc *crtc)
11741 {
11742         struct drm_connector *connector;
11743         struct drm_connector_state *new_conn_state;
11744         int i;
11745
11746         for_each_new_connector_in_state(state, connector, new_conn_state, i) {
11747                 struct drm_encoder *encoder = connector->encoder;
11748                 struct drm_crtc_state *crtc_state = NULL;
11749
11750                 if (new_conn_state->crtc != crtc)
11751                         continue;
11752
11753                 if (crtc)
11754                         crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
11755
11756                 intel_connector_verify_state(crtc_state, new_conn_state);
11757
11758                 I915_STATE_WARN(new_conn_state->best_encoder != encoder,
11759                      "connector's atomic encoder doesn't match legacy encoder\n");
11760         }
11761 }
11762
11763 static void
11764 verify_encoder_state(struct drm_device *dev, struct drm_atomic_state *state)
11765 {
11766         struct intel_encoder *encoder;
11767         struct drm_connector *connector;
11768         struct drm_connector_state *old_conn_state, *new_conn_state;
11769         int i;
11770
11771         for_each_intel_encoder(dev, encoder) {
11772                 bool enabled = false, found = false;
11773                 enum pipe pipe;
11774
11775                 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
11776                               encoder->base.base.id,
11777                               encoder->base.name);
11778
11779                 for_each_oldnew_connector_in_state(state, connector, old_conn_state,
11780                                                    new_conn_state, i) {
11781                         if (old_conn_state->best_encoder == &encoder->base)
11782                                 found = true;
11783
11784                         if (new_conn_state->best_encoder != &encoder->base)
11785                                 continue;
11786                         found = enabled = true;
11787
11788                         I915_STATE_WARN(new_conn_state->crtc !=
11789                                         encoder->base.crtc,
11790                              "connector's crtc doesn't match encoder crtc\n");
11791                 }
11792
11793                 if (!found)
11794                         continue;
11795
11796                 I915_STATE_WARN(!!encoder->base.crtc != enabled,
11797                      "encoder's enabled state mismatch "
11798                      "(expected %i, found %i)\n",
11799                      !!encoder->base.crtc, enabled);
11800
11801                 if (!encoder->base.crtc) {
11802                         bool active;
11803
11804                         active = encoder->get_hw_state(encoder, &pipe);
11805                         I915_STATE_WARN(active,
11806                              "encoder detached but still enabled on pipe %c.\n",
11807                              pipe_name(pipe));
11808                 }
11809         }
11810 }
11811
11812 static void
11813 verify_crtc_state(struct drm_crtc *crtc,
11814                   struct drm_crtc_state *old_crtc_state,
11815                   struct drm_crtc_state *new_crtc_state)
11816 {
11817         struct drm_device *dev = crtc->dev;
11818         struct drm_i915_private *dev_priv = to_i915(dev);
11819         struct intel_encoder *encoder;
11820         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11821         struct intel_crtc_state *pipe_config, *sw_config;
11822         struct drm_atomic_state *old_state;
11823         bool active;
11824
11825         old_state = old_crtc_state->state;
11826         __drm_atomic_helper_crtc_destroy_state(old_crtc_state);
11827         pipe_config = to_intel_crtc_state(old_crtc_state);
11828         memset(pipe_config, 0, sizeof(*pipe_config));
11829         pipe_config->base.crtc = crtc;
11830         pipe_config->base.state = old_state;
11831
11832         DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
11833
11834         active = dev_priv->display.get_pipe_config(intel_crtc, pipe_config);
11835
11836         /* we keep both pipes enabled on 830 */
11837         if (IS_I830(dev_priv))
11838                 active = new_crtc_state->active;
11839
11840         I915_STATE_WARN(new_crtc_state->active != active,
11841              "crtc active state doesn't match with hw state "
11842              "(expected %i, found %i)\n", new_crtc_state->active, active);
11843
11844         I915_STATE_WARN(intel_crtc->active != new_crtc_state->active,
11845              "transitional active state does not match atomic hw state "
11846              "(expected %i, found %i)\n", new_crtc_state->active, intel_crtc->active);
11847
11848         for_each_encoder_on_crtc(dev, crtc, encoder) {
11849                 enum pipe pipe;
11850
11851                 active = encoder->get_hw_state(encoder, &pipe);
11852                 I915_STATE_WARN(active != new_crtc_state->active,
11853                         "[ENCODER:%i] active %i with crtc active %i\n",
11854                         encoder->base.base.id, active, new_crtc_state->active);
11855
11856                 I915_STATE_WARN(active && intel_crtc->pipe != pipe,
11857                                 "Encoder connected to wrong pipe %c\n",
11858                                 pipe_name(pipe));
11859
11860                 if (active)
11861                         encoder->get_config(encoder, pipe_config);
11862         }
11863
11864         intel_crtc_compute_pixel_rate(pipe_config);
11865
11866         if (!new_crtc_state->active)
11867                 return;
11868
11869         intel_pipe_config_sanity_check(dev_priv, pipe_config);
11870
11871         sw_config = to_intel_crtc_state(new_crtc_state);
11872         if (!intel_pipe_config_compare(dev_priv, sw_config,
11873                                        pipe_config, false)) {
11874                 I915_STATE_WARN(1, "pipe state doesn't match!\n");
11875                 intel_dump_pipe_config(intel_crtc, pipe_config,
11876                                        "[hw state]");
11877                 intel_dump_pipe_config(intel_crtc, sw_config,
11878                                        "[sw state]");
11879         }
11880 }
11881
11882 static void
11883 intel_verify_planes(struct intel_atomic_state *state)
11884 {
11885         struct intel_plane *plane;
11886         const struct intel_plane_state *plane_state;
11887         int i;
11888
11889         for_each_new_intel_plane_in_state(state, plane,
11890                                           plane_state, i)
11891                 assert_plane(plane, plane_state->base.visible);
11892 }
11893
11894 static void
11895 verify_single_dpll_state(struct drm_i915_private *dev_priv,
11896                          struct intel_shared_dpll *pll,
11897                          struct drm_crtc *crtc,
11898                          struct drm_crtc_state *new_state)
11899 {
11900         struct intel_dpll_hw_state dpll_hw_state;
11901         unsigned int crtc_mask;
11902         bool active;
11903
11904         memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
11905
11906         DRM_DEBUG_KMS("%s\n", pll->info->name);
11907
11908         active = pll->info->funcs->get_hw_state(dev_priv, pll, &dpll_hw_state);
11909
11910         if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
11911                 I915_STATE_WARN(!pll->on && pll->active_mask,
11912                      "pll in active use but not on in sw tracking\n");
11913                 I915_STATE_WARN(pll->on && !pll->active_mask,
11914                      "pll is on but not used by any active crtc\n");
11915                 I915_STATE_WARN(pll->on != active,
11916                      "pll on state mismatch (expected %i, found %i)\n",
11917                      pll->on, active);
11918         }
11919
11920         if (!crtc) {
11921                 I915_STATE_WARN(pll->active_mask & ~pll->state.crtc_mask,
11922                                 "more active pll users than references: %x vs %x\n",
11923                                 pll->active_mask, pll->state.crtc_mask);
11924
11925                 return;
11926         }
11927
11928         crtc_mask = drm_crtc_mask(crtc);
11929
11930         if (new_state->active)
11931                 I915_STATE_WARN(!(pll->active_mask & crtc_mask),
11932                                 "pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
11933                                 pipe_name(drm_crtc_index(crtc)), pll->active_mask);
11934         else
11935                 I915_STATE_WARN(pll->active_mask & crtc_mask,
11936                                 "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
11937                                 pipe_name(drm_crtc_index(crtc)), pll->active_mask);
11938
11939         I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask),
11940                         "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
11941                         crtc_mask, pll->state.crtc_mask);
11942
11943         I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
11944                                           &dpll_hw_state,
11945                                           sizeof(dpll_hw_state)),
11946                         "pll hw state mismatch\n");
11947 }
11948
11949 static void
11950 verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc,
11951                          struct drm_crtc_state *old_crtc_state,
11952                          struct drm_crtc_state *new_crtc_state)
11953 {
11954         struct drm_i915_private *dev_priv = to_i915(dev);
11955         struct intel_crtc_state *old_state = to_intel_crtc_state(old_crtc_state);
11956         struct intel_crtc_state *new_state = to_intel_crtc_state(new_crtc_state);
11957
11958         if (new_state->shared_dpll)
11959                 verify_single_dpll_state(dev_priv, new_state->shared_dpll, crtc, new_crtc_state);
11960
11961         if (old_state->shared_dpll &&
11962             old_state->shared_dpll != new_state->shared_dpll) {
11963                 unsigned int crtc_mask = drm_crtc_mask(crtc);
11964                 struct intel_shared_dpll *pll = old_state->shared_dpll;
11965
11966                 I915_STATE_WARN(pll->active_mask & crtc_mask,
11967                                 "pll active mismatch (didn't expect pipe %c in active mask)\n",
11968                                 pipe_name(drm_crtc_index(crtc)));
11969                 I915_STATE_WARN(pll->state.crtc_mask & crtc_mask,
11970                                 "pll enabled crtcs mismatch (found %x in enabled mask)\n",
11971                                 pipe_name(drm_crtc_index(crtc)));
11972         }
11973 }
11974
11975 static void
11976 intel_modeset_verify_crtc(struct drm_crtc *crtc,
11977                           struct drm_atomic_state *state,
11978                           struct drm_crtc_state *old_state,
11979                           struct drm_crtc_state *new_state)
11980 {
11981         if (!needs_modeset(new_state) &&
11982             !to_intel_crtc_state(new_state)->update_pipe)
11983                 return;
11984
11985         verify_wm_state(crtc, new_state);
11986         verify_connector_state(crtc->dev, state, crtc);
11987         verify_crtc_state(crtc, old_state, new_state);
11988         verify_shared_dpll_state(crtc->dev, crtc, old_state, new_state);
11989 }
11990
11991 static void
11992 verify_disabled_dpll_state(struct drm_device *dev)
11993 {
11994         struct drm_i915_private *dev_priv = to_i915(dev);
11995         int i;
11996
11997         for (i = 0; i < dev_priv->num_shared_dpll; i++)
11998                 verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL);
11999 }
12000
12001 static void
12002 intel_modeset_verify_disabled(struct drm_device *dev,
12003                               struct drm_atomic_state *state)
12004 {
12005         verify_encoder_state(dev, state);
12006         verify_connector_state(dev, state, NULL);
12007         verify_disabled_dpll_state(dev);
12008 }
12009
12010 static void update_scanline_offset(struct intel_crtc *crtc)
12011 {
12012         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12013
12014         /*
12015          * The scanline counter increments at the leading edge of hsync.
12016          *
12017          * On most platforms it starts counting from vtotal-1 on the
12018          * first active line. That means the scanline counter value is
12019          * always one less than what we would expect. Ie. just after
12020          * start of vblank, which also occurs at start of hsync (on the
12021          * last active line), the scanline counter will read vblank_start-1.
12022          *
12023          * On gen2 the scanline counter starts counting from 1 instead
12024          * of vtotal-1, so we have to subtract one (or rather add vtotal-1
12025          * to keep the value positive), instead of adding one.
12026          *
12027          * On HSW+ the behaviour of the scanline counter depends on the output
12028          * type. For DP ports it behaves like most other platforms, but on HDMI
12029          * there's an extra 1 line difference. So we need to add two instead of
12030          * one to the value.
12031          *
12032          * On VLV/CHV DSI the scanline counter would appear to increment
12033          * approx. 1/3 of a scanline before start of vblank. Unfortunately
12034          * that means we can't tell whether we're in vblank or not while
12035          * we're on that particular line. We must still set scanline_offset
12036          * to 1 so that the vblank timestamps come out correct when we query
12037          * the scanline counter from within the vblank interrupt handler.
12038          * However if queried just before the start of vblank we'll get an
12039          * answer that's slightly in the future.
12040          */
12041         if (IS_GEN2(dev_priv)) {
12042                 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
12043                 int vtotal;
12044
12045                 vtotal = adjusted_mode->crtc_vtotal;
12046                 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
12047                         vtotal /= 2;
12048
12049                 crtc->scanline_offset = vtotal - 1;
12050         } else if (HAS_DDI(dev_priv) &&
12051                    intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI)) {
12052                 crtc->scanline_offset = 2;
12053         } else
12054                 crtc->scanline_offset = 1;
12055 }
12056
12057 static void intel_modeset_clear_plls(struct drm_atomic_state *state)
12058 {
12059         struct drm_device *dev = state->dev;
12060         struct drm_i915_private *dev_priv = to_i915(dev);
12061         struct drm_crtc *crtc;
12062         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
12063         int i;
12064
12065         if (!dev_priv->display.crtc_compute_clock)
12066                 return;
12067
12068         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
12069                 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12070                 struct intel_shared_dpll *old_dpll =
12071                         to_intel_crtc_state(old_crtc_state)->shared_dpll;
12072
12073                 if (!needs_modeset(new_crtc_state))
12074                         continue;
12075
12076                 to_intel_crtc_state(new_crtc_state)->shared_dpll = NULL;
12077
12078                 if (!old_dpll)
12079                         continue;
12080
12081                 intel_release_shared_dpll(old_dpll, intel_crtc, state);
12082         }
12083 }
12084
12085 /*
12086  * This implements the workaround described in the "notes" section of the mode
12087  * set sequence documentation. When going from no pipes or single pipe to
12088  * multiple pipes, and planes are enabled after the pipe, we need to wait at
12089  * least 2 vblanks on the first pipe before enabling planes on the second pipe.
12090  */
12091 static int haswell_mode_set_planes_workaround(struct drm_atomic_state *state)
12092 {
12093         struct drm_crtc_state *crtc_state;
12094         struct intel_crtc *intel_crtc;
12095         struct drm_crtc *crtc;
12096         struct intel_crtc_state *first_crtc_state = NULL;
12097         struct intel_crtc_state *other_crtc_state = NULL;
12098         enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
12099         int i;
12100
12101         /* look at all crtc's that are going to be enabled in during modeset */
12102         for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
12103                 intel_crtc = to_intel_crtc(crtc);
12104
12105                 if (!crtc_state->active || !needs_modeset(crtc_state))
12106                         continue;
12107
12108                 if (first_crtc_state) {
12109                         other_crtc_state = to_intel_crtc_state(crtc_state);
12110                         break;
12111                 } else {
12112                         first_crtc_state = to_intel_crtc_state(crtc_state);
12113                         first_pipe = intel_crtc->pipe;
12114                 }
12115         }
12116
12117         /* No workaround needed? */
12118         if (!first_crtc_state)
12119                 return 0;
12120
12121         /* w/a possibly needed, check how many crtc's are already enabled. */
12122         for_each_intel_crtc(state->dev, intel_crtc) {
12123                 struct intel_crtc_state *pipe_config;
12124
12125                 pipe_config = intel_atomic_get_crtc_state(state, intel_crtc);
12126                 if (IS_ERR(pipe_config))
12127                         return PTR_ERR(pipe_config);
12128
12129                 pipe_config->hsw_workaround_pipe = INVALID_PIPE;
12130
12131                 if (!pipe_config->base.active ||
12132                     needs_modeset(&pipe_config->base))
12133                         continue;
12134
12135                 /* 2 or more enabled crtcs means no need for w/a */
12136                 if (enabled_pipe != INVALID_PIPE)
12137                         return 0;
12138
12139                 enabled_pipe = intel_crtc->pipe;
12140         }
12141
12142         if (enabled_pipe != INVALID_PIPE)
12143                 first_crtc_state->hsw_workaround_pipe = enabled_pipe;
12144         else if (other_crtc_state)
12145                 other_crtc_state->hsw_workaround_pipe = first_pipe;
12146
12147         return 0;
12148 }
12149
12150 static int intel_lock_all_pipes(struct drm_atomic_state *state)
12151 {
12152         struct drm_crtc *crtc;
12153
12154         /* Add all pipes to the state */
12155         for_each_crtc(state->dev, crtc) {
12156                 struct drm_crtc_state *crtc_state;
12157
12158                 crtc_state = drm_atomic_get_crtc_state(state, crtc);
12159                 if (IS_ERR(crtc_state))
12160                         return PTR_ERR(crtc_state);
12161         }
12162
12163         return 0;
12164 }
12165
12166 static int intel_modeset_all_pipes(struct drm_atomic_state *state)
12167 {
12168         struct drm_crtc *crtc;
12169
12170         /*
12171          * Add all pipes to the state, and force
12172          * a modeset on all the active ones.
12173          */
12174         for_each_crtc(state->dev, crtc) {
12175                 struct drm_crtc_state *crtc_state;
12176                 int ret;
12177
12178                 crtc_state = drm_atomic_get_crtc_state(state, crtc);
12179                 if (IS_ERR(crtc_state))
12180                         return PTR_ERR(crtc_state);
12181
12182                 if (!crtc_state->active || needs_modeset(crtc_state))
12183                         continue;
12184
12185                 crtc_state->mode_changed = true;
12186
12187                 ret = drm_atomic_add_affected_connectors(state, crtc);
12188                 if (ret)
12189                         return ret;
12190
12191                 ret = drm_atomic_add_affected_planes(state, crtc);
12192                 if (ret)
12193                         return ret;
12194         }
12195
12196         return 0;
12197 }
12198
12199 static int intel_modeset_checks(struct drm_atomic_state *state)
12200 {
12201         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
12202         struct drm_i915_private *dev_priv = to_i915(state->dev);
12203         struct drm_crtc *crtc;
12204         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
12205         int ret = 0, i;
12206
12207         if (!check_digital_port_conflicts(state)) {
12208                 DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
12209                 return -EINVAL;
12210         }
12211
12212         intel_state->modeset = true;
12213         intel_state->active_crtcs = dev_priv->active_crtcs;
12214         intel_state->cdclk.logical = dev_priv->cdclk.logical;
12215         intel_state->cdclk.actual = dev_priv->cdclk.actual;
12216
12217         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
12218                 if (new_crtc_state->active)
12219                         intel_state->active_crtcs |= 1 << i;
12220                 else
12221                         intel_state->active_crtcs &= ~(1 << i);
12222
12223                 if (old_crtc_state->active != new_crtc_state->active)
12224                         intel_state->active_pipe_changes |= drm_crtc_mask(crtc);
12225         }
12226
12227         /*
12228          * See if the config requires any additional preparation, e.g.
12229          * to adjust global state with pipes off.  We need to do this
12230          * here so we can get the modeset_pipe updated config for the new
12231          * mode set on this crtc.  For other crtcs we need to use the
12232          * adjusted_mode bits in the crtc directly.
12233          */
12234         if (dev_priv->display.modeset_calc_cdclk) {
12235                 ret = dev_priv->display.modeset_calc_cdclk(state);
12236                 if (ret < 0)
12237                         return ret;
12238
12239                 /*
12240                  * Writes to dev_priv->cdclk.logical must protected by
12241                  * holding all the crtc locks, even if we don't end up
12242                  * touching the hardware
12243                  */
12244                 if (intel_cdclk_changed(&dev_priv->cdclk.logical,
12245                                         &intel_state->cdclk.logical)) {
12246                         ret = intel_lock_all_pipes(state);
12247                         if (ret < 0)
12248                                 return ret;
12249                 }
12250
12251                 /* All pipes must be switched off while we change the cdclk. */
12252                 if (intel_cdclk_needs_modeset(&dev_priv->cdclk.actual,
12253                                               &intel_state->cdclk.actual)) {
12254                         ret = intel_modeset_all_pipes(state);
12255                         if (ret < 0)
12256                                 return ret;
12257                 }
12258
12259                 DRM_DEBUG_KMS("New cdclk calculated to be logical %u kHz, actual %u kHz\n",
12260                               intel_state->cdclk.logical.cdclk,
12261                               intel_state->cdclk.actual.cdclk);
12262                 DRM_DEBUG_KMS("New voltage level calculated to be logical %u, actual %u\n",
12263                               intel_state->cdclk.logical.voltage_level,
12264                               intel_state->cdclk.actual.voltage_level);
12265         } else {
12266                 to_intel_atomic_state(state)->cdclk.logical = dev_priv->cdclk.logical;
12267         }
12268
12269         intel_modeset_clear_plls(state);
12270
12271         if (IS_HASWELL(dev_priv))
12272                 return haswell_mode_set_planes_workaround(state);
12273
12274         return 0;
12275 }
12276
12277 /*
12278  * Handle calculation of various watermark data at the end of the atomic check
12279  * phase.  The code here should be run after the per-crtc and per-plane 'check'
12280  * handlers to ensure that all derived state has been updated.
12281  */
12282 static int calc_watermark_data(struct drm_atomic_state *state)
12283 {
12284         struct drm_device *dev = state->dev;
12285         struct drm_i915_private *dev_priv = to_i915(dev);
12286
12287         /* Is there platform-specific watermark information to calculate? */
12288         if (dev_priv->display.compute_global_watermarks)
12289                 return dev_priv->display.compute_global_watermarks(state);
12290
12291         return 0;
12292 }
12293
12294 /**
12295  * intel_atomic_check - validate state object
12296  * @dev: drm device
12297  * @state: state to validate
12298  */
12299 static int intel_atomic_check(struct drm_device *dev,
12300                               struct drm_atomic_state *state)
12301 {
12302         struct drm_i915_private *dev_priv = to_i915(dev);
12303         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
12304         struct drm_crtc *crtc;
12305         struct drm_crtc_state *old_crtc_state, *crtc_state;
12306         int ret, i;
12307         bool any_ms = false;
12308
12309         /* Catch I915_MODE_FLAG_INHERITED */
12310         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
12311                                       crtc_state, i) {
12312                 if (crtc_state->mode.private_flags !=
12313                     old_crtc_state->mode.private_flags)
12314                         crtc_state->mode_changed = true;
12315         }
12316
12317         ret = drm_atomic_helper_check_modeset(dev, state);
12318         if (ret)
12319                 return ret;
12320
12321         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, crtc_state, i) {
12322                 struct intel_crtc_state *pipe_config =
12323                         to_intel_crtc_state(crtc_state);
12324
12325                 if (!needs_modeset(crtc_state))
12326                         continue;
12327
12328                 if (!crtc_state->enable) {
12329                         any_ms = true;
12330                         continue;
12331                 }
12332
12333                 ret = intel_modeset_pipe_config(crtc, pipe_config);
12334                 if (ret) {
12335                         intel_dump_pipe_config(to_intel_crtc(crtc),
12336                                                pipe_config, "[failed]");
12337                         return ret;
12338                 }
12339
12340                 if (i915_modparams.fastboot &&
12341                     intel_pipe_config_compare(dev_priv,
12342                                         to_intel_crtc_state(old_crtc_state),
12343                                         pipe_config, true)) {
12344                         crtc_state->mode_changed = false;
12345                         pipe_config->update_pipe = true;
12346                 }
12347
12348                 if (needs_modeset(crtc_state))
12349                         any_ms = true;
12350
12351                 intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
12352                                        needs_modeset(crtc_state) ?
12353                                        "[modeset]" : "[fastset]");
12354         }
12355
12356         if (any_ms) {
12357                 ret = intel_modeset_checks(state);
12358
12359                 if (ret)
12360                         return ret;
12361         } else {
12362                 intel_state->cdclk.logical = dev_priv->cdclk.logical;
12363         }
12364
12365         ret = drm_atomic_helper_check_planes(dev, state);
12366         if (ret)
12367                 return ret;
12368
12369         intel_fbc_choose_crtc(dev_priv, intel_state);
12370         return calc_watermark_data(state);
12371 }
12372
12373 static int intel_atomic_prepare_commit(struct drm_device *dev,
12374                                        struct drm_atomic_state *state)
12375 {
12376         return drm_atomic_helper_prepare_planes(dev, state);
12377 }
12378
12379 u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
12380 {
12381         struct drm_device *dev = crtc->base.dev;
12382
12383         if (!dev->max_vblank_count)
12384                 return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
12385
12386         return dev->driver->get_vblank_counter(dev, crtc->pipe);
12387 }
12388
12389 static void intel_update_crtc(struct drm_crtc *crtc,
12390                               struct drm_atomic_state *state,
12391                               struct drm_crtc_state *old_crtc_state,
12392                               struct drm_crtc_state *new_crtc_state)
12393 {
12394         struct drm_device *dev = crtc->dev;
12395         struct drm_i915_private *dev_priv = to_i915(dev);
12396         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12397         struct intel_crtc_state *pipe_config = to_intel_crtc_state(new_crtc_state);
12398         bool modeset = needs_modeset(new_crtc_state);
12399         struct intel_plane_state *new_plane_state =
12400                 intel_atomic_get_new_plane_state(to_intel_atomic_state(state),
12401                                                  to_intel_plane(crtc->primary));
12402
12403         if (modeset) {
12404                 update_scanline_offset(intel_crtc);
12405                 dev_priv->display.crtc_enable(pipe_config, state);
12406
12407                 /* vblanks work again, re-enable pipe CRC. */
12408                 intel_crtc_enable_pipe_crc(intel_crtc);
12409         } else {
12410                 intel_pre_plane_update(to_intel_crtc_state(old_crtc_state),
12411                                        pipe_config);
12412         }
12413
12414         if (new_plane_state)
12415                 intel_fbc_enable(intel_crtc, pipe_config, new_plane_state);
12416
12417         drm_atomic_helper_commit_planes_on_crtc(old_crtc_state);
12418 }
12419
12420 static void intel_update_crtcs(struct drm_atomic_state *state)
12421 {
12422         struct drm_crtc *crtc;
12423         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
12424         int i;
12425
12426         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
12427                 if (!new_crtc_state->active)
12428                         continue;
12429
12430                 intel_update_crtc(crtc, state, old_crtc_state,
12431                                   new_crtc_state);
12432         }
12433 }
12434
12435 static void skl_update_crtcs(struct drm_atomic_state *state)
12436 {
12437         struct drm_i915_private *dev_priv = to_i915(state->dev);
12438         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
12439         struct drm_crtc *crtc;
12440         struct intel_crtc *intel_crtc;
12441         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
12442         struct intel_crtc_state *cstate;
12443         unsigned int updated = 0;
12444         bool progress;
12445         enum pipe pipe;
12446         int i;
12447         u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
12448         u8 required_slices = intel_state->wm_results.ddb.enabled_slices;
12449
12450         const struct skl_ddb_entry *entries[I915_MAX_PIPES] = {};
12451
12452         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i)
12453                 /* ignore allocations for crtc's that have been turned off. */
12454                 if (new_crtc_state->active)
12455                         entries[i] = &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb;
12456
12457         /* If 2nd DBuf slice required, enable it here */
12458         if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices)
12459                 icl_dbuf_slices_update(dev_priv, required_slices);
12460
12461         /*
12462          * Whenever the number of active pipes changes, we need to make sure we
12463          * update the pipes in the right order so that their ddb allocations
12464          * never overlap with eachother inbetween CRTC updates. Otherwise we'll
12465          * cause pipe underruns and other bad stuff.
12466          */
12467         do {
12468                 progress = false;
12469
12470                 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
12471                         bool vbl_wait = false;
12472                         unsigned int cmask = drm_crtc_mask(crtc);
12473
12474                         intel_crtc = to_intel_crtc(crtc);
12475                         cstate = to_intel_crtc_state(new_crtc_state);
12476                         pipe = intel_crtc->pipe;
12477
12478                         if (updated & cmask || !cstate->base.active)
12479                                 continue;
12480
12481                         if (skl_ddb_allocation_overlaps(dev_priv,
12482                                                         entries,
12483                                                         &cstate->wm.skl.ddb,
12484                                                         i))
12485                                 continue;
12486
12487                         updated |= cmask;
12488                         entries[i] = &cstate->wm.skl.ddb;
12489
12490                         /*
12491                          * If this is an already active pipe, it's DDB changed,
12492                          * and this isn't the last pipe that needs updating
12493                          * then we need to wait for a vblank to pass for the
12494                          * new ddb allocation to take effect.
12495                          */
12496                         if (!skl_ddb_entry_equal(&cstate->wm.skl.ddb,
12497                                                  &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb) &&
12498                             !new_crtc_state->active_changed &&
12499                             intel_state->wm_results.dirty_pipes != updated)
12500                                 vbl_wait = true;
12501
12502                         intel_update_crtc(crtc, state, old_crtc_state,
12503                                           new_crtc_state);
12504
12505                         if (vbl_wait)
12506                                 intel_wait_for_vblank(dev_priv, pipe);
12507
12508                         progress = true;
12509                 }
12510         } while (progress);
12511
12512         /* If 2nd DBuf slice is no more required disable it */
12513         if (INTEL_GEN(dev_priv) >= 11 && required_slices < hw_enabled_slices)
12514                 icl_dbuf_slices_update(dev_priv, required_slices);
12515 }
12516
12517 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
12518 {
12519         struct intel_atomic_state *state, *next;
12520         struct llist_node *freed;
12521
12522         freed = llist_del_all(&dev_priv->atomic_helper.free_list);
12523         llist_for_each_entry_safe(state, next, freed, freed)
12524                 drm_atomic_state_put(&state->base);
12525 }
12526
12527 static void intel_atomic_helper_free_state_worker(struct work_struct *work)
12528 {
12529         struct drm_i915_private *dev_priv =
12530                 container_of(work, typeof(*dev_priv), atomic_helper.free_work);
12531
12532         intel_atomic_helper_free_state(dev_priv);
12533 }
12534
12535 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
12536 {
12537         struct wait_queue_entry wait_fence, wait_reset;
12538         struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
12539
12540         init_wait_entry(&wait_fence, 0);
12541         init_wait_entry(&wait_reset, 0);
12542         for (;;) {
12543                 prepare_to_wait(&intel_state->commit_ready.wait,
12544                                 &wait_fence, TASK_UNINTERRUPTIBLE);
12545                 prepare_to_wait(&dev_priv->gpu_error.wait_queue,
12546                                 &wait_reset, TASK_UNINTERRUPTIBLE);
12547
12548
12549                 if (i915_sw_fence_done(&intel_state->commit_ready)
12550                     || test_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags))
12551                         break;
12552
12553                 schedule();
12554         }
12555         finish_wait(&intel_state->commit_ready.wait, &wait_fence);
12556         finish_wait(&dev_priv->gpu_error.wait_queue, &wait_reset);
12557 }
12558
12559 static void intel_atomic_cleanup_work(struct work_struct *work)
12560 {
12561         struct drm_atomic_state *state =
12562                 container_of(work, struct drm_atomic_state, commit_work);
12563         struct drm_i915_private *i915 = to_i915(state->dev);
12564
12565         drm_atomic_helper_cleanup_planes(&i915->drm, state);
12566         drm_atomic_helper_commit_cleanup_done(state);
12567         drm_atomic_state_put(state);
12568
12569         intel_atomic_helper_free_state(i915);
12570 }
12571
12572 static void intel_atomic_commit_tail(struct drm_atomic_state *state)
12573 {
12574         struct drm_device *dev = state->dev;
12575         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
12576         struct drm_i915_private *dev_priv = to_i915(dev);
12577         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
12578         struct drm_crtc *crtc;
12579         struct intel_crtc_state *intel_cstate;
12580         u64 put_domains[I915_MAX_PIPES] = {};
12581         int i;
12582
12583         intel_atomic_commit_fence_wait(intel_state);
12584
12585         drm_atomic_helper_wait_for_dependencies(state);
12586
12587         if (intel_state->modeset)
12588                 intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
12589
12590         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
12591                 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12592
12593                 if (needs_modeset(new_crtc_state) ||
12594                     to_intel_crtc_state(new_crtc_state)->update_pipe) {
12595
12596                         put_domains[to_intel_crtc(crtc)->pipe] =
12597                                 modeset_get_crtc_power_domains(crtc,
12598                                         to_intel_crtc_state(new_crtc_state));
12599                 }
12600
12601                 if (!needs_modeset(new_crtc_state))
12602                         continue;
12603
12604                 intel_pre_plane_update(to_intel_crtc_state(old_crtc_state),
12605                                        to_intel_crtc_state(new_crtc_state));
12606
12607                 if (old_crtc_state->active) {
12608                         intel_crtc_disable_planes(crtc, old_crtc_state->plane_mask);
12609
12610                         /*
12611                          * We need to disable pipe CRC before disabling the pipe,
12612                          * or we race against vblank off.
12613                          */
12614                         intel_crtc_disable_pipe_crc(intel_crtc);
12615
12616                         dev_priv->display.crtc_disable(to_intel_crtc_state(old_crtc_state), state);
12617                         intel_crtc->active = false;
12618                         intel_fbc_disable(intel_crtc);
12619                         intel_disable_shared_dpll(intel_crtc);
12620
12621                         /*
12622                          * Underruns don't always raise
12623                          * interrupts, so check manually.
12624                          */
12625                         intel_check_cpu_fifo_underruns(dev_priv);
12626                         intel_check_pch_fifo_underruns(dev_priv);
12627
12628                         if (!new_crtc_state->active) {
12629                                 /*
12630                                  * Make sure we don't call initial_watermarks
12631                                  * for ILK-style watermark updates.
12632                                  *
12633                                  * No clue what this is supposed to achieve.
12634                                  */
12635                                 if (INTEL_GEN(dev_priv) >= 9)
12636                                         dev_priv->display.initial_watermarks(intel_state,
12637                                                                              to_intel_crtc_state(new_crtc_state));
12638                         }
12639                 }
12640         }
12641
12642         /* FIXME: Eventually get rid of our intel_crtc->config pointer */
12643         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
12644                 to_intel_crtc(crtc)->config = to_intel_crtc_state(new_crtc_state);
12645
12646         if (intel_state->modeset) {
12647                 drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
12648
12649                 intel_set_cdclk(dev_priv, &dev_priv->cdclk.actual);
12650
12651                 /*
12652                  * SKL workaround: bspec recommends we disable the SAGV when we
12653                  * have more then one pipe enabled
12654                  */
12655                 if (!intel_can_enable_sagv(state))
12656                         intel_disable_sagv(dev_priv);
12657
12658                 intel_modeset_verify_disabled(dev, state);
12659         }
12660
12661         /* Complete the events for pipes that have now been disabled */
12662         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
12663                 bool modeset = needs_modeset(new_crtc_state);
12664
12665                 /* Complete events for now disable pipes here. */
12666                 if (modeset && !new_crtc_state->active && new_crtc_state->event) {
12667                         spin_lock_irq(&dev->event_lock);
12668                         drm_crtc_send_vblank_event(crtc, new_crtc_state->event);
12669                         spin_unlock_irq(&dev->event_lock);
12670
12671                         new_crtc_state->event = NULL;
12672                 }
12673         }
12674
12675         /* Now enable the clocks, plane, pipe, and connectors that we set up. */
12676         dev_priv->display.update_crtcs(state);
12677
12678         /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
12679          * already, but still need the state for the delayed optimization. To
12680          * fix this:
12681          * - wrap the optimization/post_plane_update stuff into a per-crtc work.
12682          * - schedule that vblank worker _before_ calling hw_done
12683          * - at the start of commit_tail, cancel it _synchrously
12684          * - switch over to the vblank wait helper in the core after that since
12685          *   we don't need out special handling any more.
12686          */
12687         drm_atomic_helper_wait_for_flip_done(dev, state);
12688
12689         /*
12690          * Now that the vblank has passed, we can go ahead and program the
12691          * optimal watermarks on platforms that need two-step watermark
12692          * programming.
12693          *
12694          * TODO: Move this (and other cleanup) to an async worker eventually.
12695          */
12696         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
12697                 intel_cstate = to_intel_crtc_state(new_crtc_state);
12698
12699                 if (dev_priv->display.optimize_watermarks)
12700                         dev_priv->display.optimize_watermarks(intel_state,
12701                                                               intel_cstate);
12702         }
12703
12704         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
12705                 intel_post_plane_update(to_intel_crtc_state(old_crtc_state));
12706
12707                 if (put_domains[i])
12708                         modeset_put_power_domains(dev_priv, put_domains[i]);
12709
12710                 intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
12711         }
12712
12713         if (intel_state->modeset)
12714                 intel_verify_planes(intel_state);
12715
12716         if (intel_state->modeset && intel_can_enable_sagv(state))
12717                 intel_enable_sagv(dev_priv);
12718
12719         drm_atomic_helper_commit_hw_done(state);
12720
12721         if (intel_state->modeset) {
12722                 /* As one of the primary mmio accessors, KMS has a high
12723                  * likelihood of triggering bugs in unclaimed access. After we
12724                  * finish modesetting, see if an error has been flagged, and if
12725                  * so enable debugging for the next modeset - and hope we catch
12726                  * the culprit.
12727                  */
12728                 intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
12729                 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET);
12730         }
12731
12732         /*
12733          * Defer the cleanup of the old state to a separate worker to not
12734          * impede the current task (userspace for blocking modesets) that
12735          * are executed inline. For out-of-line asynchronous modesets/flips,
12736          * deferring to a new worker seems overkill, but we would place a
12737          * schedule point (cond_resched()) here anyway to keep latencies
12738          * down.
12739          */
12740         INIT_WORK(&state->commit_work, intel_atomic_cleanup_work);
12741         schedule_work(&state->commit_work);
12742 }
12743
12744 static void intel_atomic_commit_work(struct work_struct *work)
12745 {
12746         struct drm_atomic_state *state =
12747                 container_of(work, struct drm_atomic_state, commit_work);
12748
12749         intel_atomic_commit_tail(state);
12750 }
12751
12752 static int __i915_sw_fence_call
12753 intel_atomic_commit_ready(struct i915_sw_fence *fence,
12754                           enum i915_sw_fence_notify notify)
12755 {
12756         struct intel_atomic_state *state =
12757                 container_of(fence, struct intel_atomic_state, commit_ready);
12758
12759         switch (notify) {
12760         case FENCE_COMPLETE:
12761                 /* we do blocking waits in the worker, nothing to do here */
12762                 break;
12763         case FENCE_FREE:
12764                 {
12765                         struct intel_atomic_helper *helper =
12766                                 &to_i915(state->base.dev)->atomic_helper;
12767
12768                         if (llist_add(&state->freed, &helper->free_list))
12769                                 schedule_work(&helper->free_work);
12770                         break;
12771                 }
12772         }
12773
12774         return NOTIFY_DONE;
12775 }
12776
12777 static void intel_atomic_track_fbs(struct drm_atomic_state *state)
12778 {
12779         struct drm_plane_state *old_plane_state, *new_plane_state;
12780         struct drm_plane *plane;
12781         int i;
12782
12783         for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i)
12784                 i915_gem_track_fb(intel_fb_obj(old_plane_state->fb),
12785                                   intel_fb_obj(new_plane_state->fb),
12786                                   to_intel_plane(plane)->frontbuffer_bit);
12787 }
12788
12789 /**
12790  * intel_atomic_commit - commit validated state object
12791  * @dev: DRM device
12792  * @state: the top-level driver state object
12793  * @nonblock: nonblocking commit
12794  *
12795  * This function commits a top-level state object that has been validated
12796  * with drm_atomic_helper_check().
12797  *
12798  * RETURNS
12799  * Zero for success or -errno.
12800  */
12801 static int intel_atomic_commit(struct drm_device *dev,
12802                                struct drm_atomic_state *state,
12803                                bool nonblock)
12804 {
12805         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
12806         struct drm_i915_private *dev_priv = to_i915(dev);
12807         int ret = 0;
12808
12809         drm_atomic_state_get(state);
12810         i915_sw_fence_init(&intel_state->commit_ready,
12811                            intel_atomic_commit_ready);
12812
12813         /*
12814          * The intel_legacy_cursor_update() fast path takes care
12815          * of avoiding the vblank waits for simple cursor
12816          * movement and flips. For cursor on/off and size changes,
12817          * we want to perform the vblank waits so that watermark
12818          * updates happen during the correct frames. Gen9+ have
12819          * double buffered watermarks and so shouldn't need this.
12820          *
12821          * Unset state->legacy_cursor_update before the call to
12822          * drm_atomic_helper_setup_commit() because otherwise
12823          * drm_atomic_helper_wait_for_flip_done() is a noop and
12824          * we get FIFO underruns because we didn't wait
12825          * for vblank.
12826          *
12827          * FIXME doing watermarks and fb cleanup from a vblank worker
12828          * (assuming we had any) would solve these problems.
12829          */
12830         if (INTEL_GEN(dev_priv) < 9 && state->legacy_cursor_update) {
12831                 struct intel_crtc_state *new_crtc_state;
12832                 struct intel_crtc *crtc;
12833                 int i;
12834
12835                 for_each_new_intel_crtc_in_state(intel_state, crtc, new_crtc_state, i)
12836                         if (new_crtc_state->wm.need_postvbl_update ||
12837                             new_crtc_state->update_wm_post)
12838                                 state->legacy_cursor_update = false;
12839         }
12840
12841         ret = intel_atomic_prepare_commit(dev, state);
12842         if (ret) {
12843                 DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
12844                 i915_sw_fence_commit(&intel_state->commit_ready);
12845                 return ret;
12846         }
12847
12848         ret = drm_atomic_helper_setup_commit(state, nonblock);
12849         if (!ret)
12850                 ret = drm_atomic_helper_swap_state(state, true);
12851
12852         if (ret) {
12853                 i915_sw_fence_commit(&intel_state->commit_ready);
12854
12855                 drm_atomic_helper_cleanup_planes(dev, state);
12856                 return ret;
12857         }
12858         dev_priv->wm.distrust_bios_wm = false;
12859         intel_shared_dpll_swap_state(state);
12860         intel_atomic_track_fbs(state);
12861
12862         if (intel_state->modeset) {
12863                 memcpy(dev_priv->min_cdclk, intel_state->min_cdclk,
12864                        sizeof(intel_state->min_cdclk));
12865                 memcpy(dev_priv->min_voltage_level,
12866                        intel_state->min_voltage_level,
12867                        sizeof(intel_state->min_voltage_level));
12868                 dev_priv->active_crtcs = intel_state->active_crtcs;
12869                 dev_priv->cdclk.logical = intel_state->cdclk.logical;
12870                 dev_priv->cdclk.actual = intel_state->cdclk.actual;
12871         }
12872
12873         drm_atomic_state_get(state);
12874         INIT_WORK(&state->commit_work, intel_atomic_commit_work);
12875
12876         i915_sw_fence_commit(&intel_state->commit_ready);
12877         if (nonblock && intel_state->modeset) {
12878                 queue_work(dev_priv->modeset_wq, &state->commit_work);
12879         } else if (nonblock) {
12880                 queue_work(system_unbound_wq, &state->commit_work);
12881         } else {
12882                 if (intel_state->modeset)
12883                         flush_workqueue(dev_priv->modeset_wq);
12884                 intel_atomic_commit_tail(state);
12885         }
12886
12887         return 0;
12888 }
12889
12890 static const struct drm_crtc_funcs intel_crtc_funcs = {
12891         .gamma_set = drm_atomic_helper_legacy_gamma_set,
12892         .set_config = drm_atomic_helper_set_config,
12893         .destroy = intel_crtc_destroy,
12894         .page_flip = drm_atomic_helper_page_flip,
12895         .atomic_duplicate_state = intel_crtc_duplicate_state,
12896         .atomic_destroy_state = intel_crtc_destroy_state,
12897         .set_crc_source = intel_crtc_set_crc_source,
12898         .verify_crc_source = intel_crtc_verify_crc_source,
12899         .get_crc_sources = intel_crtc_get_crc_sources,
12900 };
12901
12902 struct wait_rps_boost {
12903         struct wait_queue_entry wait;
12904
12905         struct drm_crtc *crtc;
12906         struct i915_request *request;
12907 };
12908
12909 static int do_rps_boost(struct wait_queue_entry *_wait,
12910                         unsigned mode, int sync, void *key)
12911 {
12912         struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
12913         struct i915_request *rq = wait->request;
12914
12915         /*
12916          * If we missed the vblank, but the request is already running it
12917          * is reasonable to assume that it will complete before the next
12918          * vblank without our intervention, so leave RPS alone.
12919          */
12920         if (!i915_request_started(rq))
12921                 gen6_rps_boost(rq, NULL);
12922         i915_request_put(rq);
12923
12924         drm_crtc_vblank_put(wait->crtc);
12925
12926         list_del(&wait->wait.entry);
12927         kfree(wait);
12928         return 1;
12929 }
12930
12931 static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
12932                                        struct dma_fence *fence)
12933 {
12934         struct wait_rps_boost *wait;
12935
12936         if (!dma_fence_is_i915(fence))
12937                 return;
12938
12939         if (INTEL_GEN(to_i915(crtc->dev)) < 6)
12940                 return;
12941
12942         if (drm_crtc_vblank_get(crtc))
12943                 return;
12944
12945         wait = kmalloc(sizeof(*wait), GFP_KERNEL);
12946         if (!wait) {
12947                 drm_crtc_vblank_put(crtc);
12948                 return;
12949         }
12950
12951         wait->request = to_request(dma_fence_get(fence));
12952         wait->crtc = crtc;
12953
12954         wait->wait.func = do_rps_boost;
12955         wait->wait.flags = 0;
12956
12957         add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
12958 }
12959
12960 static int intel_plane_pin_fb(struct intel_plane_state *plane_state)
12961 {
12962         struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
12963         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
12964         struct drm_framebuffer *fb = plane_state->base.fb;
12965         struct i915_vma *vma;
12966
12967         if (plane->id == PLANE_CURSOR &&
12968             INTEL_INFO(dev_priv)->cursor_needs_physical) {
12969                 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
12970                 const int align = intel_cursor_alignment(dev_priv);
12971
12972                 return i915_gem_object_attach_phys(obj, align);
12973         }
12974
12975         vma = intel_pin_and_fence_fb_obj(fb,
12976                                          plane_state->base.rotation,
12977                                          intel_plane_uses_fence(plane_state),
12978                                          &plane_state->flags);
12979         if (IS_ERR(vma))
12980                 return PTR_ERR(vma);
12981
12982         plane_state->vma = vma;
12983
12984         return 0;
12985 }
12986
12987 static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
12988 {
12989         struct i915_vma *vma;
12990
12991         vma = fetch_and_zero(&old_plane_state->vma);
12992         if (vma)
12993                 intel_unpin_fb_vma(vma, old_plane_state->flags);
12994 }
12995
12996 static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj)
12997 {
12998         struct i915_sched_attr attr = {
12999                 .priority = I915_PRIORITY_DISPLAY,
13000         };
13001
13002         i915_gem_object_wait_priority(obj, 0, &attr);
13003 }
13004
13005 /**
13006  * intel_prepare_plane_fb - Prepare fb for usage on plane
13007  * @plane: drm plane to prepare for
13008  * @new_state: the plane state being prepared
13009  *
13010  * Prepares a framebuffer for usage on a display plane.  Generally this
13011  * involves pinning the underlying object and updating the frontbuffer tracking
13012  * bits.  Some older platforms need special physical address handling for
13013  * cursor planes.
13014  *
13015  * Must be called with struct_mutex held.
13016  *
13017  * Returns 0 on success, negative error code on failure.
13018  */
13019 int
13020 intel_prepare_plane_fb(struct drm_plane *plane,
13021                        struct drm_plane_state *new_state)
13022 {
13023         struct intel_atomic_state *intel_state =
13024                 to_intel_atomic_state(new_state->state);
13025         struct drm_i915_private *dev_priv = to_i915(plane->dev);
13026         struct drm_framebuffer *fb = new_state->fb;
13027         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
13028         struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
13029         int ret;
13030
13031         if (old_obj) {
13032                 struct drm_crtc_state *crtc_state =
13033                         drm_atomic_get_new_crtc_state(new_state->state,
13034                                                       plane->state->crtc);
13035
13036                 /* Big Hammer, we also need to ensure that any pending
13037                  * MI_WAIT_FOR_EVENT inside a user batch buffer on the
13038                  * current scanout is retired before unpinning the old
13039                  * framebuffer. Note that we rely on userspace rendering
13040                  * into the buffer attached to the pipe they are waiting
13041                  * on. If not, userspace generates a GPU hang with IPEHR
13042                  * point to the MI_WAIT_FOR_EVENT.
13043                  *
13044                  * This should only fail upon a hung GPU, in which case we
13045                  * can safely continue.
13046                  */
13047                 if (needs_modeset(crtc_state)) {
13048                         ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
13049                                                               old_obj->resv, NULL,
13050                                                               false, 0,
13051                                                               GFP_KERNEL);
13052                         if (ret < 0)
13053                                 return ret;
13054                 }
13055         }
13056
13057         if (new_state->fence) { /* explicit fencing */
13058                 ret = i915_sw_fence_await_dma_fence(&intel_state->commit_ready,
13059                                                     new_state->fence,
13060                                                     I915_FENCE_TIMEOUT,
13061                                                     GFP_KERNEL);
13062                 if (ret < 0)
13063                         return ret;
13064         }
13065
13066         if (!obj)
13067                 return 0;
13068
13069         ret = i915_gem_object_pin_pages(obj);
13070         if (ret)
13071                 return ret;
13072
13073         ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
13074         if (ret) {
13075                 i915_gem_object_unpin_pages(obj);
13076                 return ret;
13077         }
13078
13079         ret = intel_plane_pin_fb(to_intel_plane_state(new_state));
13080
13081         fb_obj_bump_render_priority(obj);
13082
13083         mutex_unlock(&dev_priv->drm.struct_mutex);
13084         i915_gem_object_unpin_pages(obj);
13085         if (ret)
13086                 return ret;
13087
13088         intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
13089
13090         if (!new_state->fence) { /* implicit fencing */
13091                 struct dma_fence *fence;
13092
13093                 ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
13094                                                       obj->resv, NULL,
13095                                                       false, I915_FENCE_TIMEOUT,
13096                                                       GFP_KERNEL);
13097                 if (ret < 0)
13098                         return ret;
13099
13100                 fence = reservation_object_get_excl_rcu(obj->resv);
13101                 if (fence) {
13102                         add_rps_boost_after_vblank(new_state->crtc, fence);
13103                         dma_fence_put(fence);
13104                 }
13105         } else {
13106                 add_rps_boost_after_vblank(new_state->crtc, new_state->fence);
13107         }
13108
13109         /*
13110          * We declare pageflips to be interactive and so merit a small bias
13111          * towards upclocking to deliver the frame on time. By only changing
13112          * the RPS thresholds to sample more regularly and aim for higher
13113          * clocks we can hopefully deliver low power workloads (like kodi)
13114          * that are not quite steady state without resorting to forcing
13115          * maximum clocks following a vblank miss (see do_rps_boost()).
13116          */
13117         if (!intel_state->rps_interactive) {
13118                 intel_rps_mark_interactive(dev_priv, true);
13119                 intel_state->rps_interactive = true;
13120         }
13121
13122         return 0;
13123 }
13124
13125 /**
13126  * intel_cleanup_plane_fb - Cleans up an fb after plane use
13127  * @plane: drm plane to clean up for
13128  * @old_state: the state from the previous modeset
13129  *
13130  * Cleans up a framebuffer that has just been removed from a plane.
13131  *
13132  * Must be called with struct_mutex held.
13133  */
13134 void
13135 intel_cleanup_plane_fb(struct drm_plane *plane,
13136                        struct drm_plane_state *old_state)
13137 {
13138         struct intel_atomic_state *intel_state =
13139                 to_intel_atomic_state(old_state->state);
13140         struct drm_i915_private *dev_priv = to_i915(plane->dev);
13141
13142         if (intel_state->rps_interactive) {
13143                 intel_rps_mark_interactive(dev_priv, false);
13144                 intel_state->rps_interactive = false;
13145         }
13146
13147         /* Should only be called after a successful intel_prepare_plane_fb()! */
13148         mutex_lock(&dev_priv->drm.struct_mutex);
13149         intel_plane_unpin_fb(to_intel_plane_state(old_state));
13150         mutex_unlock(&dev_priv->drm.struct_mutex);
13151 }
13152
13153 int
13154 skl_max_scale(struct intel_crtc *intel_crtc,
13155               struct intel_crtc_state *crtc_state,
13156               uint32_t pixel_format)
13157 {
13158         struct drm_i915_private *dev_priv;
13159         int max_scale, mult;
13160         int crtc_clock, max_dotclk, tmpclk1, tmpclk2;
13161
13162         if (!intel_crtc || !crtc_state->base.enable)
13163                 return DRM_PLANE_HELPER_NO_SCALING;
13164
13165         dev_priv = to_i915(intel_crtc->base.dev);
13166
13167         crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
13168         max_dotclk = to_intel_atomic_state(crtc_state->base.state)->cdclk.logical.cdclk;
13169
13170         if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10)
13171                 max_dotclk *= 2;
13172
13173         if (WARN_ON_ONCE(!crtc_clock || max_dotclk < crtc_clock))
13174                 return DRM_PLANE_HELPER_NO_SCALING;
13175
13176         /*
13177          * skl max scale is lower of:
13178          *    close to 3 but not 3, -1 is for that purpose
13179          *            or
13180          *    cdclk/crtc_clock
13181          */
13182         mult = pixel_format == DRM_FORMAT_NV12 ? 2 : 3;
13183         tmpclk1 = (1 << 16) * mult - 1;
13184         tmpclk2 = (1 << 8) * ((max_dotclk << 8) / crtc_clock);
13185         max_scale = min(tmpclk1, tmpclk2);
13186
13187         return max_scale;
13188 }
13189
13190 static int
13191 intel_check_primary_plane(struct intel_plane *plane,
13192                           struct intel_crtc_state *crtc_state,
13193                           struct intel_plane_state *state)
13194 {
13195         struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
13196         struct drm_crtc *crtc = state->base.crtc;
13197         int min_scale = DRM_PLANE_HELPER_NO_SCALING;
13198         int max_scale = DRM_PLANE_HELPER_NO_SCALING;
13199         bool can_position = false;
13200         int ret;
13201         uint32_t pixel_format = 0;
13202
13203         if (INTEL_GEN(dev_priv) >= 9) {
13204                 /* use scaler when colorkey is not required */
13205                 if (!state->ckey.flags) {
13206                         min_scale = 1;
13207                         if (state->base.fb)
13208                                 pixel_format = state->base.fb->format->format;
13209                         max_scale = skl_max_scale(to_intel_crtc(crtc),
13210                                                   crtc_state, pixel_format);
13211                 }
13212                 can_position = true;
13213         }
13214
13215         ret = drm_atomic_helper_check_plane_state(&state->base,
13216                                                   &crtc_state->base,
13217                                                   min_scale, max_scale,
13218                                                   can_position, true);
13219         if (ret)
13220                 return ret;
13221
13222         if (!state->base.fb)
13223                 return 0;
13224
13225         if (INTEL_GEN(dev_priv) >= 9) {
13226                 ret = skl_check_plane_surface(crtc_state, state);
13227                 if (ret)
13228                         return ret;
13229
13230                 state->ctl = skl_plane_ctl(crtc_state, state);
13231         } else {
13232                 ret = i9xx_check_plane_surface(state);
13233                 if (ret)
13234                         return ret;
13235
13236                 state->ctl = i9xx_plane_ctl(crtc_state, state);
13237         }
13238
13239         if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
13240                 state->color_ctl = glk_plane_color_ctl(crtc_state, state);
13241
13242         return 0;
13243 }
13244
13245 static void intel_begin_crtc_commit(struct drm_crtc *crtc,
13246                                     struct drm_crtc_state *old_crtc_state)
13247 {
13248         struct drm_device *dev = crtc->dev;
13249         struct drm_i915_private *dev_priv = to_i915(dev);
13250         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13251         struct intel_crtc_state *old_intel_cstate =
13252                 to_intel_crtc_state(old_crtc_state);
13253         struct intel_atomic_state *old_intel_state =
13254                 to_intel_atomic_state(old_crtc_state->state);
13255         struct intel_crtc_state *intel_cstate =
13256                 intel_atomic_get_new_crtc_state(old_intel_state, intel_crtc);
13257         bool modeset = needs_modeset(&intel_cstate->base);
13258
13259         if (!modeset &&
13260             (intel_cstate->base.color_mgmt_changed ||
13261              intel_cstate->update_pipe)) {
13262                 intel_color_set_csc(&intel_cstate->base);
13263                 intel_color_load_luts(&intel_cstate->base);
13264         }
13265
13266         /* Perform vblank evasion around commit operation */
13267         intel_pipe_update_start(intel_cstate);
13268
13269         if (modeset)
13270                 goto out;
13271
13272         if (intel_cstate->update_pipe)
13273                 intel_update_pipe_config(old_intel_cstate, intel_cstate);
13274         else if (INTEL_GEN(dev_priv) >= 9)
13275                 skl_detach_scalers(intel_crtc);
13276
13277 out:
13278         if (dev_priv->display.atomic_update_watermarks)
13279                 dev_priv->display.atomic_update_watermarks(old_intel_state,
13280                                                            intel_cstate);
13281 }
13282
13283 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
13284                                   struct intel_crtc_state *crtc_state)
13285 {
13286         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13287
13288         if (!IS_GEN2(dev_priv))
13289                 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
13290
13291         if (crtc_state->has_pch_encoder) {
13292                 enum pipe pch_transcoder =
13293                         intel_crtc_pch_transcoder(crtc);
13294
13295                 intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
13296         }
13297 }
13298
13299 static void intel_finish_crtc_commit(struct drm_crtc *crtc,
13300                                      struct drm_crtc_state *old_crtc_state)
13301 {
13302         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13303         struct intel_atomic_state *old_intel_state =
13304                 to_intel_atomic_state(old_crtc_state->state);
13305         struct intel_crtc_state *new_crtc_state =
13306                 intel_atomic_get_new_crtc_state(old_intel_state, intel_crtc);
13307
13308         intel_pipe_update_end(new_crtc_state);
13309
13310         if (new_crtc_state->update_pipe &&
13311             !needs_modeset(&new_crtc_state->base) &&
13312             old_crtc_state->mode.private_flags & I915_MODE_FLAG_INHERITED)
13313                 intel_crtc_arm_fifo_underrun(intel_crtc, new_crtc_state);
13314 }
13315
13316 /**
13317  * intel_plane_destroy - destroy a plane
13318  * @plane: plane to destroy
13319  *
13320  * Common destruction function for all types of planes (primary, cursor,
13321  * sprite).
13322  */
13323 void intel_plane_destroy(struct drm_plane *plane)
13324 {
13325         drm_plane_cleanup(plane);
13326         kfree(to_intel_plane(plane));
13327 }
13328
13329 static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane,
13330                                             u32 format, u64 modifier)
13331 {
13332         switch (modifier) {
13333         case DRM_FORMAT_MOD_LINEAR:
13334         case I915_FORMAT_MOD_X_TILED:
13335                 break;
13336         default:
13337                 return false;
13338         }
13339
13340         switch (format) {
13341         case DRM_FORMAT_C8:
13342         case DRM_FORMAT_RGB565:
13343         case DRM_FORMAT_XRGB1555:
13344         case DRM_FORMAT_XRGB8888:
13345                 return modifier == DRM_FORMAT_MOD_LINEAR ||
13346                         modifier == I915_FORMAT_MOD_X_TILED;
13347         default:
13348                 return false;
13349         }
13350 }
13351
13352 static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
13353                                             u32 format, u64 modifier)
13354 {
13355         switch (modifier) {
13356         case DRM_FORMAT_MOD_LINEAR:
13357         case I915_FORMAT_MOD_X_TILED:
13358                 break;
13359         default:
13360                 return false;
13361         }
13362
13363         switch (format) {
13364         case DRM_FORMAT_C8:
13365         case DRM_FORMAT_RGB565:
13366         case DRM_FORMAT_XRGB8888:
13367         case DRM_FORMAT_XBGR8888:
13368         case DRM_FORMAT_XRGB2101010:
13369         case DRM_FORMAT_XBGR2101010:
13370                 return modifier == DRM_FORMAT_MOD_LINEAR ||
13371                         modifier == I915_FORMAT_MOD_X_TILED;
13372         default:
13373                 return false;
13374         }
13375 }
13376
13377 static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
13378                                            u32 format, u64 modifier)
13379 {
13380         struct intel_plane *plane = to_intel_plane(_plane);
13381
13382         switch (modifier) {
13383         case DRM_FORMAT_MOD_LINEAR:
13384         case I915_FORMAT_MOD_X_TILED:
13385         case I915_FORMAT_MOD_Y_TILED:
13386         case I915_FORMAT_MOD_Yf_TILED:
13387                 break;
13388         case I915_FORMAT_MOD_Y_TILED_CCS:
13389         case I915_FORMAT_MOD_Yf_TILED_CCS:
13390                 if (!plane->has_ccs)
13391                         return false;
13392                 break;
13393         default:
13394                 return false;
13395         }
13396
13397         switch (format) {
13398         case DRM_FORMAT_XRGB8888:
13399         case DRM_FORMAT_XBGR8888:
13400         case DRM_FORMAT_ARGB8888:
13401         case DRM_FORMAT_ABGR8888:
13402                 if (modifier == I915_FORMAT_MOD_Yf_TILED_CCS ||
13403                     modifier == I915_FORMAT_MOD_Y_TILED_CCS)
13404                         return true;
13405                 /* fall through */
13406         case DRM_FORMAT_RGB565:
13407         case DRM_FORMAT_XRGB2101010:
13408         case DRM_FORMAT_XBGR2101010:
13409         case DRM_FORMAT_YUYV:
13410         case DRM_FORMAT_YVYU:
13411         case DRM_FORMAT_UYVY:
13412         case DRM_FORMAT_VYUY:
13413         case DRM_FORMAT_NV12:
13414                 if (modifier == I915_FORMAT_MOD_Yf_TILED)
13415                         return true;
13416                 /* fall through */
13417         case DRM_FORMAT_C8:
13418                 if (modifier == DRM_FORMAT_MOD_LINEAR ||
13419                     modifier == I915_FORMAT_MOD_X_TILED ||
13420                     modifier == I915_FORMAT_MOD_Y_TILED)
13421                         return true;
13422                 /* fall through */
13423         default:
13424                 return false;
13425         }
13426 }
13427
13428 static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
13429                                               u32 format, u64 modifier)
13430 {
13431         return modifier == DRM_FORMAT_MOD_LINEAR &&
13432                 format == DRM_FORMAT_ARGB8888;
13433 }
13434
13435 static struct drm_plane_funcs skl_plane_funcs = {
13436         .update_plane = drm_atomic_helper_update_plane,
13437         .disable_plane = drm_atomic_helper_disable_plane,
13438         .destroy = intel_plane_destroy,
13439         .atomic_get_property = intel_plane_atomic_get_property,
13440         .atomic_set_property = intel_plane_atomic_set_property,
13441         .atomic_duplicate_state = intel_plane_duplicate_state,
13442         .atomic_destroy_state = intel_plane_destroy_state,
13443         .format_mod_supported = skl_plane_format_mod_supported,
13444 };
13445
13446 static struct drm_plane_funcs i965_plane_funcs = {
13447         .update_plane = drm_atomic_helper_update_plane,
13448         .disable_plane = drm_atomic_helper_disable_plane,
13449         .destroy = intel_plane_destroy,
13450         .atomic_get_property = intel_plane_atomic_get_property,
13451         .atomic_set_property = intel_plane_atomic_set_property,
13452         .atomic_duplicate_state = intel_plane_duplicate_state,
13453         .atomic_destroy_state = intel_plane_destroy_state,
13454         .format_mod_supported = i965_plane_format_mod_supported,
13455 };
13456
13457 static struct drm_plane_funcs i8xx_plane_funcs = {
13458         .update_plane = drm_atomic_helper_update_plane,
13459         .disable_plane = drm_atomic_helper_disable_plane,
13460         .destroy = intel_plane_destroy,
13461         .atomic_get_property = intel_plane_atomic_get_property,
13462         .atomic_set_property = intel_plane_atomic_set_property,
13463         .atomic_duplicate_state = intel_plane_duplicate_state,
13464         .atomic_destroy_state = intel_plane_destroy_state,
13465         .format_mod_supported = i8xx_plane_format_mod_supported,
13466 };
13467
13468 static int
13469 intel_legacy_cursor_update(struct drm_plane *plane,
13470                            struct drm_crtc *crtc,
13471                            struct drm_framebuffer *fb,
13472                            int crtc_x, int crtc_y,
13473                            unsigned int crtc_w, unsigned int crtc_h,
13474                            uint32_t src_x, uint32_t src_y,
13475                            uint32_t src_w, uint32_t src_h,
13476                            struct drm_modeset_acquire_ctx *ctx)
13477 {
13478         struct drm_i915_private *dev_priv = to_i915(crtc->dev);
13479         int ret;
13480         struct drm_plane_state *old_plane_state, *new_plane_state;
13481         struct intel_plane *intel_plane = to_intel_plane(plane);
13482         struct drm_framebuffer *old_fb;
13483         struct drm_crtc_state *crtc_state = crtc->state;
13484
13485         /*
13486          * When crtc is inactive or there is a modeset pending,
13487          * wait for it to complete in the slowpath
13488          */
13489         if (!crtc_state->active || needs_modeset(crtc_state) ||
13490             to_intel_crtc_state(crtc_state)->update_pipe)
13491                 goto slow;
13492
13493         old_plane_state = plane->state;
13494         /*
13495          * Don't do an async update if there is an outstanding commit modifying
13496          * the plane.  This prevents our async update's changes from getting
13497          * overridden by a previous synchronous update's state.
13498          */
13499         if (old_plane_state->commit &&
13500             !try_wait_for_completion(&old_plane_state->commit->hw_done))
13501                 goto slow;
13502
13503         /*
13504          * If any parameters change that may affect watermarks,
13505          * take the slowpath. Only changing fb or position should be
13506          * in the fastpath.
13507          */
13508         if (old_plane_state->crtc != crtc ||
13509             old_plane_state->src_w != src_w ||
13510             old_plane_state->src_h != src_h ||
13511             old_plane_state->crtc_w != crtc_w ||
13512             old_plane_state->crtc_h != crtc_h ||
13513             !old_plane_state->fb != !fb)
13514                 goto slow;
13515
13516         new_plane_state = intel_plane_duplicate_state(plane);
13517         if (!new_plane_state)
13518                 return -ENOMEM;
13519
13520         drm_atomic_set_fb_for_plane(new_plane_state, fb);
13521
13522         new_plane_state->src_x = src_x;
13523         new_plane_state->src_y = src_y;
13524         new_plane_state->src_w = src_w;
13525         new_plane_state->src_h = src_h;
13526         new_plane_state->crtc_x = crtc_x;
13527         new_plane_state->crtc_y = crtc_y;
13528         new_plane_state->crtc_w = crtc_w;
13529         new_plane_state->crtc_h = crtc_h;
13530
13531         ret = intel_plane_atomic_check_with_state(to_intel_crtc_state(crtc->state),
13532                                                   to_intel_crtc_state(crtc->state), /* FIXME need a new crtc state? */
13533                                                   to_intel_plane_state(plane->state),
13534                                                   to_intel_plane_state(new_plane_state));
13535         if (ret)
13536                 goto out_free;
13537
13538         ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
13539         if (ret)
13540                 goto out_free;
13541
13542         ret = intel_plane_pin_fb(to_intel_plane_state(new_plane_state));
13543         if (ret)
13544                 goto out_unlock;
13545
13546         intel_fb_obj_flush(intel_fb_obj(fb), ORIGIN_FLIP);
13547
13548         old_fb = old_plane_state->fb;
13549         i915_gem_track_fb(intel_fb_obj(old_fb), intel_fb_obj(fb),
13550                           intel_plane->frontbuffer_bit);
13551
13552         /* Swap plane state */
13553         plane->state = new_plane_state;
13554
13555         if (plane->state->visible) {
13556                 trace_intel_update_plane(plane, to_intel_crtc(crtc));
13557                 intel_plane->update_plane(intel_plane,
13558                                           to_intel_crtc_state(crtc->state),
13559                                           to_intel_plane_state(plane->state));
13560         } else {
13561                 trace_intel_disable_plane(plane, to_intel_crtc(crtc));
13562                 intel_plane->disable_plane(intel_plane, to_intel_crtc(crtc));
13563         }
13564
13565         intel_plane_unpin_fb(to_intel_plane_state(old_plane_state));
13566
13567 out_unlock:
13568         mutex_unlock(&dev_priv->drm.struct_mutex);
13569 out_free:
13570         if (ret)
13571                 intel_plane_destroy_state(plane, new_plane_state);
13572         else
13573                 intel_plane_destroy_state(plane, old_plane_state);
13574         return ret;
13575
13576 slow:
13577         return drm_atomic_helper_update_plane(plane, crtc, fb,
13578                                               crtc_x, crtc_y, crtc_w, crtc_h,
13579                                               src_x, src_y, src_w, src_h, ctx);
13580 }
13581
13582 static const struct drm_plane_funcs intel_cursor_plane_funcs = {
13583         .update_plane = intel_legacy_cursor_update,
13584         .disable_plane = drm_atomic_helper_disable_plane,
13585         .destroy = intel_plane_destroy,
13586         .atomic_get_property = intel_plane_atomic_get_property,
13587         .atomic_set_property = intel_plane_atomic_set_property,
13588         .atomic_duplicate_state = intel_plane_duplicate_state,
13589         .atomic_destroy_state = intel_plane_destroy_state,
13590         .format_mod_supported = intel_cursor_format_mod_supported,
13591 };
13592
13593 static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
13594                                enum i9xx_plane_id i9xx_plane)
13595 {
13596         if (!HAS_FBC(dev_priv))
13597                 return false;
13598
13599         if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
13600                 return i9xx_plane == PLANE_A; /* tied to pipe A */
13601         else if (IS_IVYBRIDGE(dev_priv))
13602                 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B ||
13603                         i9xx_plane == PLANE_C;
13604         else if (INTEL_GEN(dev_priv) >= 4)
13605                 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B;
13606         else
13607                 return i9xx_plane == PLANE_A;
13608 }
13609
13610 static bool skl_plane_has_fbc(struct drm_i915_private *dev_priv,
13611                               enum pipe pipe, enum plane_id plane_id)
13612 {
13613         if (!HAS_FBC(dev_priv))
13614                 return false;
13615
13616         return pipe == PIPE_A && plane_id == PLANE_PRIMARY;
13617 }
13618
13619 bool skl_plane_has_planar(struct drm_i915_private *dev_priv,
13620                           enum pipe pipe, enum plane_id plane_id)
13621 {
13622         if (plane_id == PLANE_PRIMARY) {
13623                 if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
13624                         return false;
13625                 else if ((INTEL_GEN(dev_priv) == 9 && pipe == PIPE_C) &&
13626                          !IS_GEMINILAKE(dev_priv))
13627                         return false;
13628         } else if (plane_id >= PLANE_SPRITE0) {
13629                 if (plane_id == PLANE_CURSOR)
13630                         return false;
13631                 if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) == 10) {
13632                         if (plane_id != PLANE_SPRITE0)
13633                                 return false;
13634                 } else {
13635                         if (plane_id != PLANE_SPRITE0 || pipe == PIPE_C ||
13636                             IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
13637                                 return false;
13638                 }
13639         }
13640         return true;
13641 }
13642
13643 static struct intel_plane *
13644 intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
13645 {
13646         struct intel_plane *primary = NULL;
13647         struct intel_plane_state *state = NULL;
13648         const struct drm_plane_funcs *plane_funcs;
13649         const uint32_t *intel_primary_formats;
13650         unsigned int supported_rotations;
13651         unsigned int num_formats;
13652         const uint64_t *modifiers;
13653         int ret;
13654
13655         primary = kzalloc(sizeof(*primary), GFP_KERNEL);
13656         if (!primary) {
13657                 ret = -ENOMEM;
13658                 goto fail;
13659         }
13660
13661         state = intel_create_plane_state(&primary->base);
13662         if (!state) {
13663                 ret = -ENOMEM;
13664                 goto fail;
13665         }
13666
13667         primary->base.state = &state->base;
13668
13669         primary->can_scale = false;
13670         primary->max_downscale = 1;
13671         if (INTEL_GEN(dev_priv) >= 9) {
13672                 primary->can_scale = true;
13673                 state->scaler_id = -1;
13674         }
13675         primary->pipe = pipe;
13676         /*
13677          * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
13678          * port is hooked to pipe B. Hence we want plane A feeding pipe B.
13679          */
13680         if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4)
13681                 primary->i9xx_plane = (enum i9xx_plane_id) !pipe;
13682         else
13683                 primary->i9xx_plane = (enum i9xx_plane_id) pipe;
13684         primary->id = PLANE_PRIMARY;
13685         primary->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, primary->id);
13686
13687         if (INTEL_GEN(dev_priv) >= 9)
13688                 primary->has_fbc = skl_plane_has_fbc(dev_priv,
13689                                                      primary->pipe,
13690                                                      primary->id);
13691         else
13692                 primary->has_fbc = i9xx_plane_has_fbc(dev_priv,
13693                                                       primary->i9xx_plane);
13694
13695         if (primary->has_fbc) {
13696                 struct intel_fbc *fbc = &dev_priv->fbc;
13697
13698                 fbc->possible_framebuffer_bits |= primary->frontbuffer_bit;
13699         }
13700
13701         primary->check_plane = intel_check_primary_plane;
13702
13703         if (INTEL_GEN(dev_priv) >= 9) {
13704                 primary->has_ccs = skl_plane_has_ccs(dev_priv, pipe,
13705                                                      PLANE_PRIMARY);
13706
13707                 if (skl_plane_has_planar(dev_priv, pipe, PLANE_PRIMARY)) {
13708                         intel_primary_formats = skl_pri_planar_formats;
13709                         num_formats = ARRAY_SIZE(skl_pri_planar_formats);
13710                 } else {
13711                         intel_primary_formats = skl_primary_formats;
13712                         num_formats = ARRAY_SIZE(skl_primary_formats);
13713                 }
13714
13715                 if (primary->has_ccs)
13716                         modifiers = skl_format_modifiers_ccs;
13717                 else
13718                         modifiers = skl_format_modifiers_noccs;
13719
13720                 primary->update_plane = skl_update_plane;
13721                 primary->disable_plane = skl_disable_plane;
13722                 primary->get_hw_state = skl_plane_get_hw_state;
13723
13724                 plane_funcs = &skl_plane_funcs;
13725         } else if (INTEL_GEN(dev_priv) >= 4) {
13726                 intel_primary_formats = i965_primary_formats;
13727                 num_formats = ARRAY_SIZE(i965_primary_formats);
13728                 modifiers = i9xx_format_modifiers;
13729
13730                 primary->update_plane = i9xx_update_plane;
13731                 primary->disable_plane = i9xx_disable_plane;
13732                 primary->get_hw_state = i9xx_plane_get_hw_state;
13733
13734                 plane_funcs = &i965_plane_funcs;
13735         } else {
13736                 intel_primary_formats = i8xx_primary_formats;
13737                 num_formats = ARRAY_SIZE(i8xx_primary_formats);
13738                 modifiers = i9xx_format_modifiers;
13739
13740                 primary->update_plane = i9xx_update_plane;
13741                 primary->disable_plane = i9xx_disable_plane;
13742                 primary->get_hw_state = i9xx_plane_get_hw_state;
13743
13744                 plane_funcs = &i8xx_plane_funcs;
13745         }
13746
13747         if (INTEL_GEN(dev_priv) >= 9)
13748                 ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
13749                                                0, plane_funcs,
13750                                                intel_primary_formats, num_formats,
13751                                                modifiers,
13752                                                DRM_PLANE_TYPE_PRIMARY,
13753                                                "plane 1%c", pipe_name(pipe));
13754         else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
13755                 ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
13756                                                0, plane_funcs,
13757                                                intel_primary_formats, num_formats,
13758                                                modifiers,
13759                                                DRM_PLANE_TYPE_PRIMARY,
13760                                                "primary %c", pipe_name(pipe));
13761         else
13762                 ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
13763                                                0, plane_funcs,
13764                                                intel_primary_formats, num_formats,
13765                                                modifiers,
13766                                                DRM_PLANE_TYPE_PRIMARY,
13767                                                "plane %c",
13768                                                plane_name(primary->i9xx_plane));
13769         if (ret)
13770                 goto fail;
13771
13772         if (INTEL_GEN(dev_priv) >= 10) {
13773                 supported_rotations =
13774                         DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
13775                         DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270 |
13776                         DRM_MODE_REFLECT_X;
13777         } else if (INTEL_GEN(dev_priv) >= 9) {
13778                 supported_rotations =
13779                         DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
13780                         DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
13781         } else if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
13782                 supported_rotations =
13783                         DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
13784                         DRM_MODE_REFLECT_X;
13785         } else if (INTEL_GEN(dev_priv) >= 4) {
13786                 supported_rotations =
13787                         DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
13788         } else {
13789                 supported_rotations = DRM_MODE_ROTATE_0;
13790         }
13791
13792         if (INTEL_GEN(dev_priv) >= 4)
13793                 drm_plane_create_rotation_property(&primary->base,
13794                                                    DRM_MODE_ROTATE_0,
13795                                                    supported_rotations);
13796
13797         if (INTEL_GEN(dev_priv) >= 9)
13798                 drm_plane_create_color_properties(&primary->base,
13799                                                   BIT(DRM_COLOR_YCBCR_BT601) |
13800                                                   BIT(DRM_COLOR_YCBCR_BT709),
13801                                                   BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
13802                                                   BIT(DRM_COLOR_YCBCR_FULL_RANGE),
13803                                                   DRM_COLOR_YCBCR_BT709,
13804                                                   DRM_COLOR_YCBCR_LIMITED_RANGE);
13805
13806         drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs);
13807
13808         return primary;
13809
13810 fail:
13811         kfree(state);
13812         kfree(primary);
13813
13814         return ERR_PTR(ret);
13815 }
13816
13817 static struct intel_plane *
13818 intel_cursor_plane_create(struct drm_i915_private *dev_priv,
13819                           enum pipe pipe)
13820 {
13821         struct intel_plane *cursor = NULL;
13822         struct intel_plane_state *state = NULL;
13823         int ret;
13824
13825         cursor = kzalloc(sizeof(*cursor), GFP_KERNEL);
13826         if (!cursor) {
13827                 ret = -ENOMEM;
13828                 goto fail;
13829         }
13830
13831         state = intel_create_plane_state(&cursor->base);
13832         if (!state) {
13833                 ret = -ENOMEM;
13834                 goto fail;
13835         }
13836
13837         cursor->base.state = &state->base;
13838
13839         cursor->can_scale = false;
13840         cursor->max_downscale = 1;
13841         cursor->pipe = pipe;
13842         cursor->i9xx_plane = (enum i9xx_plane_id) pipe;
13843         cursor->id = PLANE_CURSOR;
13844         cursor->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, cursor->id);
13845
13846         if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
13847                 cursor->update_plane = i845_update_cursor;
13848                 cursor->disable_plane = i845_disable_cursor;
13849                 cursor->get_hw_state = i845_cursor_get_hw_state;
13850                 cursor->check_plane = i845_check_cursor;
13851         } else {
13852                 cursor->update_plane = i9xx_update_cursor;
13853                 cursor->disable_plane = i9xx_disable_cursor;
13854                 cursor->get_hw_state = i9xx_cursor_get_hw_state;
13855                 cursor->check_plane = i9xx_check_cursor;
13856         }
13857
13858         cursor->cursor.base = ~0;
13859         cursor->cursor.cntl = ~0;
13860
13861         if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv))
13862                 cursor->cursor.size = ~0;
13863
13864         ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
13865                                        0, &intel_cursor_plane_funcs,
13866                                        intel_cursor_formats,
13867                                        ARRAY_SIZE(intel_cursor_formats),
13868                                        cursor_format_modifiers,
13869                                        DRM_PLANE_TYPE_CURSOR,
13870                                        "cursor %c", pipe_name(pipe));
13871         if (ret)
13872                 goto fail;
13873
13874         if (INTEL_GEN(dev_priv) >= 4)
13875                 drm_plane_create_rotation_property(&cursor->base,
13876                                                    DRM_MODE_ROTATE_0,
13877                                                    DRM_MODE_ROTATE_0 |
13878                                                    DRM_MODE_ROTATE_180);
13879
13880         if (INTEL_GEN(dev_priv) >= 9)
13881                 state->scaler_id = -1;
13882
13883         drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
13884
13885         return cursor;
13886
13887 fail:
13888         kfree(state);
13889         kfree(cursor);
13890
13891         return ERR_PTR(ret);
13892 }
13893
13894 static void intel_crtc_init_scalers(struct intel_crtc *crtc,
13895                                     struct intel_crtc_state *crtc_state)
13896 {
13897         struct intel_crtc_scaler_state *scaler_state =
13898                 &crtc_state->scaler_state;
13899         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
13900         int i;
13901
13902         crtc->num_scalers = dev_priv->info.num_scalers[crtc->pipe];
13903         if (!crtc->num_scalers)
13904                 return;
13905
13906         for (i = 0; i < crtc->num_scalers; i++) {
13907                 struct intel_scaler *scaler = &scaler_state->scalers[i];
13908
13909                 scaler->in_use = 0;
13910                 scaler->mode = PS_SCALER_MODE_DYN;
13911         }
13912
13913         scaler_state->scaler_id = -1;
13914 }
13915
13916 static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
13917 {
13918         struct intel_crtc *intel_crtc;
13919         struct intel_crtc_state *crtc_state = NULL;
13920         struct intel_plane *primary = NULL;
13921         struct intel_plane *cursor = NULL;
13922         int sprite, ret;
13923
13924         intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
13925         if (!intel_crtc)
13926                 return -ENOMEM;
13927
13928         crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
13929         if (!crtc_state) {
13930                 ret = -ENOMEM;
13931                 goto fail;
13932         }
13933         intel_crtc->config = crtc_state;
13934         intel_crtc->base.state = &crtc_state->base;
13935         crtc_state->base.crtc = &intel_crtc->base;
13936
13937         primary = intel_primary_plane_create(dev_priv, pipe);
13938         if (IS_ERR(primary)) {
13939                 ret = PTR_ERR(primary);
13940                 goto fail;
13941         }
13942         intel_crtc->plane_ids_mask |= BIT(primary->id);
13943
13944         for_each_sprite(dev_priv, pipe, sprite) {
13945                 struct intel_plane *plane;
13946
13947                 plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
13948                 if (IS_ERR(plane)) {
13949                         ret = PTR_ERR(plane);
13950                         goto fail;
13951                 }
13952                 intel_crtc->plane_ids_mask |= BIT(plane->id);
13953         }
13954
13955         cursor = intel_cursor_plane_create(dev_priv, pipe);
13956         if (IS_ERR(cursor)) {
13957                 ret = PTR_ERR(cursor);
13958                 goto fail;
13959         }
13960         intel_crtc->plane_ids_mask |= BIT(cursor->id);
13961
13962         ret = drm_crtc_init_with_planes(&dev_priv->drm, &intel_crtc->base,
13963                                         &primary->base, &cursor->base,
13964                                         &intel_crtc_funcs,
13965                                         "pipe %c", pipe_name(pipe));
13966         if (ret)
13967                 goto fail;
13968
13969         intel_crtc->pipe = pipe;
13970
13971         /* initialize shared scalers */
13972         intel_crtc_init_scalers(intel_crtc, crtc_state);
13973
13974         BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) ||
13975                dev_priv->pipe_to_crtc_mapping[pipe] != NULL);
13976         dev_priv->pipe_to_crtc_mapping[pipe] = intel_crtc;
13977
13978         if (INTEL_GEN(dev_priv) < 9) {
13979                 enum i9xx_plane_id i9xx_plane = primary->i9xx_plane;
13980
13981                 BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
13982                        dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL);
13983                 dev_priv->plane_to_crtc_mapping[i9xx_plane] = intel_crtc;
13984         }
13985
13986         drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
13987
13988         intel_color_init(&intel_crtc->base);
13989
13990         WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
13991
13992         return 0;
13993
13994 fail:
13995         /*
13996          * drm_mode_config_cleanup() will free up any
13997          * crtcs/planes already initialized.
13998          */
13999         kfree(crtc_state);
14000         kfree(intel_crtc);
14001
14002         return ret;
14003 }
14004
14005 enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
14006 {
14007         struct drm_device *dev = connector->base.dev;
14008
14009         WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
14010
14011         if (!connector->base.state->crtc)
14012                 return INVALID_PIPE;
14013
14014         return to_intel_crtc(connector->base.state->crtc)->pipe;
14015 }
14016
14017 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
14018                                       struct drm_file *file)
14019 {
14020         struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
14021         struct drm_crtc *drmmode_crtc;
14022         struct intel_crtc *crtc;
14023
14024         drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
14025         if (!drmmode_crtc)
14026                 return -ENOENT;
14027
14028         crtc = to_intel_crtc(drmmode_crtc);
14029         pipe_from_crtc_id->pipe = crtc->pipe;
14030
14031         return 0;
14032 }
14033
14034 static int intel_encoder_clones(struct intel_encoder *encoder)
14035 {
14036         struct drm_device *dev = encoder->base.dev;
14037         struct intel_encoder *source_encoder;
14038         int index_mask = 0;
14039         int entry = 0;
14040
14041         for_each_intel_encoder(dev, source_encoder) {
14042                 if (encoders_cloneable(encoder, source_encoder))
14043                         index_mask |= (1 << entry);
14044
14045                 entry++;
14046         }
14047
14048         return index_mask;
14049 }
14050
14051 static bool has_edp_a(struct drm_i915_private *dev_priv)
14052 {
14053         if (!IS_MOBILE(dev_priv))
14054                 return false;
14055
14056         if ((I915_READ(DP_A) & DP_DETECTED) == 0)
14057                 return false;
14058
14059         if (IS_GEN5(dev_priv) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
14060                 return false;
14061
14062         return true;
14063 }
14064
14065 static bool intel_crt_present(struct drm_i915_private *dev_priv)
14066 {
14067         if (INTEL_GEN(dev_priv) >= 9)
14068                 return false;
14069
14070         if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
14071                 return false;
14072
14073         if (IS_CHERRYVIEW(dev_priv))
14074                 return false;
14075
14076         if (HAS_PCH_LPT_H(dev_priv) &&
14077             I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
14078                 return false;
14079
14080         /* DDI E can't be used if DDI A requires 4 lanes */
14081         if (HAS_DDI(dev_priv) && I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
14082                 return false;
14083
14084         if (!dev_priv->vbt.int_crt_support)
14085                 return false;
14086
14087         return true;
14088 }
14089
14090 void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
14091 {
14092         int pps_num;
14093         int pps_idx;
14094
14095         if (HAS_DDI(dev_priv))
14096                 return;
14097         /*
14098          * This w/a is needed at least on CPT/PPT, but to be sure apply it
14099          * everywhere where registers can be write protected.
14100          */
14101         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
14102                 pps_num = 2;
14103         else
14104                 pps_num = 1;
14105
14106         for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
14107                 u32 val = I915_READ(PP_CONTROL(pps_idx));
14108
14109                 val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
14110                 I915_WRITE(PP_CONTROL(pps_idx), val);
14111         }
14112 }
14113
14114 static void intel_pps_init(struct drm_i915_private *dev_priv)
14115 {
14116         if (HAS_PCH_SPLIT(dev_priv) || IS_GEN9_LP(dev_priv))
14117                 dev_priv->pps_mmio_base = PCH_PPS_BASE;
14118         else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
14119                 dev_priv->pps_mmio_base = VLV_PPS_BASE;
14120         else
14121                 dev_priv->pps_mmio_base = PPS_BASE;
14122
14123         intel_pps_unlock_regs_wa(dev_priv);
14124 }
14125
14126 static void intel_setup_outputs(struct drm_i915_private *dev_priv)
14127 {
14128         struct intel_encoder *encoder;
14129         bool dpd_is_edp = false;
14130
14131         intel_pps_init(dev_priv);
14132
14133         /*
14134          * intel_edp_init_connector() depends on this completing first, to
14135          * prevent the registeration of both eDP and LVDS and the incorrect
14136          * sharing of the PPS.
14137          */
14138         intel_lvds_init(dev_priv);
14139
14140         if (intel_crt_present(dev_priv))
14141                 intel_crt_init(dev_priv);
14142
14143         if (IS_ICELAKE(dev_priv)) {
14144                 intel_ddi_init(dev_priv, PORT_A);
14145                 intel_ddi_init(dev_priv, PORT_B);
14146                 intel_ddi_init(dev_priv, PORT_C);
14147                 intel_ddi_init(dev_priv, PORT_D);
14148                 intel_ddi_init(dev_priv, PORT_E);
14149                 intel_ddi_init(dev_priv, PORT_F);
14150         } else if (IS_GEN9_LP(dev_priv)) {
14151                 /*
14152                  * FIXME: Broxton doesn't support port detection via the
14153                  * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
14154                  * detect the ports.
14155                  */
14156                 intel_ddi_init(dev_priv, PORT_A);
14157                 intel_ddi_init(dev_priv, PORT_B);
14158                 intel_ddi_init(dev_priv, PORT_C);
14159
14160                 vlv_dsi_init(dev_priv);
14161         } else if (HAS_DDI(dev_priv)) {
14162                 int found;
14163
14164                 /*
14165                  * Haswell uses DDI functions to detect digital outputs.
14166                  * On SKL pre-D0 the strap isn't connected, so we assume
14167                  * it's there.
14168                  */
14169                 found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
14170                 /* WaIgnoreDDIAStrap: skl */
14171                 if (found || IS_GEN9_BC(dev_priv))
14172                         intel_ddi_init(dev_priv, PORT_A);
14173
14174                 /* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP
14175                  * register */
14176                 found = I915_READ(SFUSE_STRAP);
14177
14178                 if (found & SFUSE_STRAP_DDIB_DETECTED)
14179                         intel_ddi_init(dev_priv, PORT_B);
14180                 if (found & SFUSE_STRAP_DDIC_DETECTED)
14181                         intel_ddi_init(dev_priv, PORT_C);
14182                 if (found & SFUSE_STRAP_DDID_DETECTED)
14183                         intel_ddi_init(dev_priv, PORT_D);
14184                 if (found & SFUSE_STRAP_DDIF_DETECTED)
14185                         intel_ddi_init(dev_priv, PORT_F);
14186                 /*
14187                  * On SKL we don't have a way to detect DDI-E so we rely on VBT.
14188                  */
14189                 if (IS_GEN9_BC(dev_priv) &&
14190                     (dev_priv->vbt.ddi_port_info[PORT_E].supports_dp ||
14191                      dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi ||
14192                      dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi))
14193                         intel_ddi_init(dev_priv, PORT_E);
14194
14195         } else if (HAS_PCH_SPLIT(dev_priv)) {
14196                 int found;
14197                 dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
14198
14199                 if (has_edp_a(dev_priv))
14200                         intel_dp_init(dev_priv, DP_A, PORT_A);
14201
14202                 if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
14203                         /* PCH SDVOB multiplex with HDMIB */
14204                         found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
14205                         if (!found)
14206                                 intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
14207                         if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
14208                                 intel_dp_init(dev_priv, PCH_DP_B, PORT_B);
14209                 }
14210
14211                 if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
14212                         intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
14213
14214                 if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
14215                         intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
14216
14217                 if (I915_READ(PCH_DP_C) & DP_DETECTED)
14218                         intel_dp_init(dev_priv, PCH_DP_C, PORT_C);
14219
14220                 if (I915_READ(PCH_DP_D) & DP_DETECTED)
14221                         intel_dp_init(dev_priv, PCH_DP_D, PORT_D);
14222         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
14223                 bool has_edp, has_port;
14224
14225                 /*
14226                  * The DP_DETECTED bit is the latched state of the DDC
14227                  * SDA pin at boot. However since eDP doesn't require DDC
14228                  * (no way to plug in a DP->HDMI dongle) the DDC pins for
14229                  * eDP ports may have been muxed to an alternate function.
14230                  * Thus we can't rely on the DP_DETECTED bit alone to detect
14231                  * eDP ports. Consult the VBT as well as DP_DETECTED to
14232                  * detect eDP ports.
14233                  *
14234                  * Sadly the straps seem to be missing sometimes even for HDMI
14235                  * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
14236                  * and VBT for the presence of the port. Additionally we can't
14237                  * trust the port type the VBT declares as we've seen at least
14238                  * HDMI ports that the VBT claim are DP or eDP.
14239                  */
14240                 has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
14241                 has_port = intel_bios_is_port_present(dev_priv, PORT_B);
14242                 if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
14243                         has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B);
14244                 if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
14245                         intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
14246
14247                 has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
14248                 has_port = intel_bios_is_port_present(dev_priv, PORT_C);
14249                 if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
14250                         has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C);
14251                 if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
14252                         intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
14253
14254                 if (IS_CHERRYVIEW(dev_priv)) {
14255                         /*
14256                          * eDP not supported on port D,
14257                          * so no need to worry about it
14258                          */
14259                         has_port = intel_bios_is_port_present(dev_priv, PORT_D);
14260                         if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
14261                                 intel_dp_init(dev_priv, CHV_DP_D, PORT_D);
14262                         if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
14263                                 intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
14264                 }
14265
14266                 vlv_dsi_init(dev_priv);
14267         } else if (!IS_GEN2(dev_priv) && !IS_PINEVIEW(dev_priv)) {
14268                 bool found = false;
14269
14270                 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
14271                         DRM_DEBUG_KMS("probing SDVOB\n");
14272                         found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
14273                         if (!found && IS_G4X(dev_priv)) {
14274                                 DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
14275                                 intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
14276                         }
14277
14278                         if (!found && IS_G4X(dev_priv))
14279                                 intel_dp_init(dev_priv, DP_B, PORT_B);
14280                 }
14281
14282                 /* Before G4X SDVOC doesn't have its own detect register */
14283
14284                 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
14285                         DRM_DEBUG_KMS("probing SDVOC\n");
14286                         found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
14287                 }
14288
14289                 if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
14290
14291                         if (IS_G4X(dev_priv)) {
14292                                 DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
14293                                 intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
14294                         }
14295                         if (IS_G4X(dev_priv))
14296                                 intel_dp_init(dev_priv, DP_C, PORT_C);
14297                 }
14298
14299                 if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED))
14300                         intel_dp_init(dev_priv, DP_D, PORT_D);
14301         } else if (IS_GEN2(dev_priv))
14302                 intel_dvo_init(dev_priv);
14303
14304         if (SUPPORTS_TV(dev_priv))
14305                 intel_tv_init(dev_priv);
14306
14307         intel_psr_init(dev_priv);
14308
14309         for_each_intel_encoder(&dev_priv->drm, encoder) {
14310                 encoder->base.possible_crtcs = encoder->crtc_mask;
14311                 encoder->base.possible_clones =
14312                         intel_encoder_clones(encoder);
14313         }
14314
14315         intel_init_pch_refclk(dev_priv);
14316
14317         drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
14318 }
14319
14320 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
14321 {
14322         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
14323         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
14324
14325         drm_framebuffer_cleanup(fb);
14326
14327         i915_gem_object_lock(obj);
14328         WARN_ON(!obj->framebuffer_references--);
14329         i915_gem_object_unlock(obj);
14330
14331         i915_gem_object_put(obj);
14332
14333         kfree(intel_fb);
14334 }
14335
14336 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
14337                                                 struct drm_file *file,
14338                                                 unsigned int *handle)
14339 {
14340         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
14341
14342         if (obj->userptr.mm) {
14343                 DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
14344                 return -EINVAL;
14345         }
14346
14347         return drm_gem_handle_create(file, &obj->base, handle);
14348 }
14349
14350 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
14351                                         struct drm_file *file,
14352                                         unsigned flags, unsigned color,
14353                                         struct drm_clip_rect *clips,
14354                                         unsigned num_clips)
14355 {
14356         struct drm_i915_gem_object *obj = intel_fb_obj(fb);
14357
14358         i915_gem_object_flush_if_display(obj);
14359         intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
14360
14361         return 0;
14362 }
14363
14364 static const struct drm_framebuffer_funcs intel_fb_funcs = {
14365         .destroy = intel_user_framebuffer_destroy,
14366         .create_handle = intel_user_framebuffer_create_handle,
14367         .dirty = intel_user_framebuffer_dirty,
14368 };
14369
14370 static
14371 u32 intel_fb_pitch_limit(struct drm_i915_private *dev_priv,
14372                          uint64_t fb_modifier, uint32_t pixel_format)
14373 {
14374         u32 gen = INTEL_GEN(dev_priv);
14375
14376         if (gen >= 9) {
14377                 int cpp = drm_format_plane_cpp(pixel_format, 0);
14378
14379                 /* "The stride in bytes must not exceed the of the size of 8K
14380                  *  pixels and 32K bytes."
14381                  */
14382                 return min(8192 * cpp, 32768);
14383         } else if (gen >= 5 && !HAS_GMCH_DISPLAY(dev_priv)) {
14384                 return 32*1024;
14385         } else if (gen >= 4) {
14386                 if (fb_modifier == I915_FORMAT_MOD_X_TILED)
14387                         return 16*1024;
14388                 else
14389                         return 32*1024;
14390         } else if (gen >= 3) {
14391                 if (fb_modifier == I915_FORMAT_MOD_X_TILED)
14392                         return 8*1024;
14393                 else
14394                         return 16*1024;
14395         } else {
14396                 /* XXX DSPC is limited to 4k tiled */
14397                 return 8*1024;
14398         }
14399 }
14400
14401 static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
14402                                   struct drm_i915_gem_object *obj,
14403                                   struct drm_mode_fb_cmd2 *mode_cmd)
14404 {
14405         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
14406         struct drm_framebuffer *fb = &intel_fb->base;
14407         struct drm_format_name_buf format_name;
14408         u32 pitch_limit;
14409         unsigned int tiling, stride;
14410         int ret = -EINVAL;
14411         int i;
14412
14413         i915_gem_object_lock(obj);
14414         obj->framebuffer_references++;
14415         tiling = i915_gem_object_get_tiling(obj);
14416         stride = i915_gem_object_get_stride(obj);
14417         i915_gem_object_unlock(obj);
14418
14419         if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
14420                 /*
14421                  * If there's a fence, enforce that
14422                  * the fb modifier and tiling mode match.
14423                  */
14424                 if (tiling != I915_TILING_NONE &&
14425                     tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
14426                         DRM_DEBUG_KMS("tiling_mode doesn't match fb modifier\n");
14427                         goto err;
14428                 }
14429         } else {
14430                 if (tiling == I915_TILING_X) {
14431                         mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
14432                 } else if (tiling == I915_TILING_Y) {
14433                         DRM_DEBUG_KMS("No Y tiling for legacy addfb\n");
14434                         goto err;
14435                 }
14436         }
14437
14438         /* Passed in modifier sanity checking. */
14439         switch (mode_cmd->modifier[0]) {
14440         case I915_FORMAT_MOD_Y_TILED_CCS:
14441         case I915_FORMAT_MOD_Yf_TILED_CCS:
14442                 switch (mode_cmd->pixel_format) {
14443                 case DRM_FORMAT_XBGR8888:
14444                 case DRM_FORMAT_ABGR8888:
14445                 case DRM_FORMAT_XRGB8888:
14446                 case DRM_FORMAT_ARGB8888:
14447                         break;
14448                 default:
14449                         DRM_DEBUG_KMS("RC supported only with RGB8888 formats\n");
14450                         goto err;
14451                 }
14452                 /* fall through */
14453         case I915_FORMAT_MOD_Y_TILED:
14454         case I915_FORMAT_MOD_Yf_TILED:
14455                 if (INTEL_GEN(dev_priv) < 9) {
14456                         DRM_DEBUG_KMS("Unsupported tiling 0x%llx!\n",
14457                                       mode_cmd->modifier[0]);
14458                         goto err;
14459                 }
14460         case DRM_FORMAT_MOD_LINEAR:
14461         case I915_FORMAT_MOD_X_TILED:
14462                 break;
14463         default:
14464                 DRM_DEBUG_KMS("Unsupported fb modifier 0x%llx!\n",
14465                               mode_cmd->modifier[0]);
14466                 goto err;
14467         }
14468
14469         /*
14470          * gen2/3 display engine uses the fence if present,
14471          * so the tiling mode must match the fb modifier exactly.
14472          */
14473         if (INTEL_GEN(dev_priv) < 4 &&
14474             tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
14475                 DRM_DEBUG_KMS("tiling_mode must match fb modifier exactly on gen2/3\n");
14476                 goto err;
14477         }
14478
14479         pitch_limit = intel_fb_pitch_limit(dev_priv, mode_cmd->modifier[0],
14480                                            mode_cmd->pixel_format);
14481         if (mode_cmd->pitches[0] > pitch_limit) {
14482                 DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n",
14483                               mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
14484                               "tiled" : "linear",
14485                               mode_cmd->pitches[0], pitch_limit);
14486                 goto err;
14487         }
14488
14489         /*
14490          * If there's a fence, enforce that
14491          * the fb pitch and fence stride match.
14492          */
14493         if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
14494                 DRM_DEBUG_KMS("pitch (%d) must match tiling stride (%d)\n",
14495                               mode_cmd->pitches[0], stride);
14496                 goto err;
14497         }
14498
14499         /* Reject formats not supported by any plane early. */
14500         switch (mode_cmd->pixel_format) {
14501         case DRM_FORMAT_C8:
14502         case DRM_FORMAT_RGB565:
14503         case DRM_FORMAT_XRGB8888:
14504         case DRM_FORMAT_ARGB8888:
14505                 break;
14506         case DRM_FORMAT_XRGB1555:
14507                 if (INTEL_GEN(dev_priv) > 3) {
14508                         DRM_DEBUG_KMS("unsupported pixel format: %s\n",
14509                                       drm_get_format_name(mode_cmd->pixel_format, &format_name));
14510                         goto err;
14511                 }
14512                 break;
14513         case DRM_FORMAT_ABGR8888:
14514                 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
14515                     INTEL_GEN(dev_priv) < 9) {
14516                         DRM_DEBUG_KMS("unsupported pixel format: %s\n",
14517                                       drm_get_format_name(mode_cmd->pixel_format, &format_name));
14518                         goto err;
14519                 }
14520                 break;
14521         case DRM_FORMAT_XBGR8888:
14522         case DRM_FORMAT_XRGB2101010:
14523         case DRM_FORMAT_XBGR2101010:
14524                 if (INTEL_GEN(dev_priv) < 4) {
14525                         DRM_DEBUG_KMS("unsupported pixel format: %s\n",
14526                                       drm_get_format_name(mode_cmd->pixel_format, &format_name));
14527                         goto err;
14528                 }
14529                 break;
14530         case DRM_FORMAT_ABGR2101010:
14531                 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
14532                         DRM_DEBUG_KMS("unsupported pixel format: %s\n",
14533                                       drm_get_format_name(mode_cmd->pixel_format, &format_name));
14534                         goto err;
14535                 }
14536                 break;
14537         case DRM_FORMAT_YUYV:
14538         case DRM_FORMAT_UYVY:
14539         case DRM_FORMAT_YVYU:
14540         case DRM_FORMAT_VYUY:
14541                 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
14542                         DRM_DEBUG_KMS("unsupported pixel format: %s\n",
14543                                       drm_get_format_name(mode_cmd->pixel_format, &format_name));
14544                         goto err;
14545                 }
14546                 break;
14547         case DRM_FORMAT_NV12:
14548                 if (INTEL_GEN(dev_priv) < 9 || IS_SKYLAKE(dev_priv) ||
14549                     IS_BROXTON(dev_priv)) {
14550                         DRM_DEBUG_KMS("unsupported pixel format: %s\n",
14551                                       drm_get_format_name(mode_cmd->pixel_format,
14552                                                           &format_name));
14553                         goto err;
14554                 }
14555                 break;
14556         default:
14557                 DRM_DEBUG_KMS("unsupported pixel format: %s\n",
14558                               drm_get_format_name(mode_cmd->pixel_format, &format_name));
14559                 goto err;
14560         }
14561
14562         /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
14563         if (mode_cmd->offsets[0] != 0)
14564                 goto err;
14565
14566         drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
14567
14568         if (fb->format->format == DRM_FORMAT_NV12 &&
14569             (fb->width < SKL_MIN_YUV_420_SRC_W ||
14570              fb->height < SKL_MIN_YUV_420_SRC_H ||
14571              (fb->width % 4) != 0 || (fb->height % 4) != 0)) {
14572                 DRM_DEBUG_KMS("src dimensions not correct for NV12\n");
14573                 return -EINVAL;
14574         }
14575
14576         for (i = 0; i < fb->format->num_planes; i++) {
14577                 u32 stride_alignment;
14578
14579                 if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
14580                         DRM_DEBUG_KMS("bad plane %d handle\n", i);
14581                         goto err;
14582                 }
14583
14584                 stride_alignment = intel_fb_stride_alignment(fb, i);
14585
14586                 /*
14587                  * Display WA #0531: skl,bxt,kbl,glk
14588                  *
14589                  * Render decompression and plane width > 3840
14590                  * combined with horizontal panning requires the
14591                  * plane stride to be a multiple of 4. We'll just
14592                  * require the entire fb to accommodate that to avoid
14593                  * potential runtime errors at plane configuration time.
14594                  */
14595                 if (IS_GEN9(dev_priv) && i == 0 && fb->width > 3840 &&
14596                     (fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
14597                      fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS))
14598                         stride_alignment *= 4;
14599
14600                 if (fb->pitches[i] & (stride_alignment - 1)) {
14601                         DRM_DEBUG_KMS("plane %d pitch (%d) must be at least %u byte aligned\n",
14602                                       i, fb->pitches[i], stride_alignment);
14603                         goto err;
14604                 }
14605
14606                 fb->obj[i] = &obj->base;
14607         }
14608
14609         ret = intel_fill_fb_info(dev_priv, fb);
14610         if (ret)
14611                 goto err;
14612
14613         ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
14614         if (ret) {
14615                 DRM_ERROR("framebuffer init failed %d\n", ret);
14616                 goto err;
14617         }
14618
14619         return 0;
14620
14621 err:
14622         i915_gem_object_lock(obj);
14623         obj->framebuffer_references--;
14624         i915_gem_object_unlock(obj);
14625         return ret;
14626 }
14627
14628 static struct drm_framebuffer *
14629 intel_user_framebuffer_create(struct drm_device *dev,
14630                               struct drm_file *filp,
14631                               const struct drm_mode_fb_cmd2 *user_mode_cmd)
14632 {
14633         struct drm_framebuffer *fb;
14634         struct drm_i915_gem_object *obj;
14635         struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
14636
14637         obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
14638         if (!obj)
14639                 return ERR_PTR(-ENOENT);
14640
14641         fb = intel_framebuffer_create(obj, &mode_cmd);
14642         if (IS_ERR(fb))
14643                 i915_gem_object_put(obj);
14644
14645         return fb;
14646 }
14647
14648 static void intel_atomic_state_free(struct drm_atomic_state *state)
14649 {
14650         struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
14651
14652         drm_atomic_state_default_release(state);
14653
14654         i915_sw_fence_fini(&intel_state->commit_ready);
14655
14656         kfree(state);
14657 }
14658
14659 static enum drm_mode_status
14660 intel_mode_valid(struct drm_device *dev,
14661                  const struct drm_display_mode *mode)
14662 {
14663         struct drm_i915_private *dev_priv = to_i915(dev);
14664         int hdisplay_max, htotal_max;
14665         int vdisplay_max, vtotal_max;
14666
14667         /*
14668          * Can't reject DBLSCAN here because Xorg ddxen can add piles
14669          * of DBLSCAN modes to the output's mode list when they detect
14670          * the scaling mode property on the connector. And they don't
14671          * ask the kernel to validate those modes in any way until
14672          * modeset time at which point the client gets a protocol error.
14673          * So in order to not upset those clients we silently ignore the
14674          * DBLSCAN flag on such connectors. For other connectors we will
14675          * reject modes with the DBLSCAN flag in encoder->compute_config().
14676          * And we always reject DBLSCAN modes in connector->mode_valid()
14677          * as we never want such modes on the connector's mode list.
14678          */
14679
14680         if (mode->vscan > 1)
14681                 return MODE_NO_VSCAN;
14682
14683         if (mode->flags & DRM_MODE_FLAG_HSKEW)
14684                 return MODE_H_ILLEGAL;
14685
14686         if (mode->flags & (DRM_MODE_FLAG_CSYNC |
14687                            DRM_MODE_FLAG_NCSYNC |
14688                            DRM_MODE_FLAG_PCSYNC))
14689                 return MODE_HSYNC;
14690
14691         if (mode->flags & (DRM_MODE_FLAG_BCAST |
14692                            DRM_MODE_FLAG_PIXMUX |
14693                            DRM_MODE_FLAG_CLKDIV2))
14694                 return MODE_BAD;
14695
14696         if (INTEL_GEN(dev_priv) >= 9 ||
14697             IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
14698                 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
14699                 vdisplay_max = 4096;
14700                 htotal_max = 8192;
14701                 vtotal_max = 8192;
14702         } else if (INTEL_GEN(dev_priv) >= 3) {
14703                 hdisplay_max = 4096;
14704                 vdisplay_max = 4096;
14705                 htotal_max = 8192;
14706                 vtotal_max = 8192;
14707         } else {
14708                 hdisplay_max = 2048;
14709                 vdisplay_max = 2048;
14710                 htotal_max = 4096;
14711                 vtotal_max = 4096;
14712         }
14713
14714         if (mode->hdisplay > hdisplay_max ||
14715             mode->hsync_start > htotal_max ||
14716             mode->hsync_end > htotal_max ||
14717             mode->htotal > htotal_max)
14718                 return MODE_H_ILLEGAL;
14719
14720         if (mode->vdisplay > vdisplay_max ||
14721             mode->vsync_start > vtotal_max ||
14722             mode->vsync_end > vtotal_max ||
14723             mode->vtotal > vtotal_max)
14724                 return MODE_V_ILLEGAL;
14725
14726         return MODE_OK;
14727 }
14728
14729 static const struct drm_mode_config_funcs intel_mode_funcs = {
14730         .fb_create = intel_user_framebuffer_create,
14731         .get_format_info = intel_get_format_info,
14732         .output_poll_changed = intel_fbdev_output_poll_changed,
14733         .mode_valid = intel_mode_valid,
14734         .atomic_check = intel_atomic_check,
14735         .atomic_commit = intel_atomic_commit,
14736         .atomic_state_alloc = intel_atomic_state_alloc,
14737         .atomic_state_clear = intel_atomic_state_clear,
14738         .atomic_state_free = intel_atomic_state_free,
14739 };
14740
14741 /**
14742  * intel_init_display_hooks - initialize the display modesetting hooks
14743  * @dev_priv: device private
14744  */
14745 void intel_init_display_hooks(struct drm_i915_private *dev_priv)
14746 {
14747         intel_init_cdclk_hooks(dev_priv);
14748
14749         if (INTEL_GEN(dev_priv) >= 9) {
14750                 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
14751                 dev_priv->display.get_initial_plane_config =
14752                         skylake_get_initial_plane_config;
14753                 dev_priv->display.crtc_compute_clock =
14754                         haswell_crtc_compute_clock;
14755                 dev_priv->display.crtc_enable = haswell_crtc_enable;
14756                 dev_priv->display.crtc_disable = haswell_crtc_disable;
14757         } else if (HAS_DDI(dev_priv)) {
14758                 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
14759                 dev_priv->display.get_initial_plane_config =
14760                         i9xx_get_initial_plane_config;
14761                 dev_priv->display.crtc_compute_clock =
14762                         haswell_crtc_compute_clock;
14763                 dev_priv->display.crtc_enable = haswell_crtc_enable;
14764                 dev_priv->display.crtc_disable = haswell_crtc_disable;
14765         } else if (HAS_PCH_SPLIT(dev_priv)) {
14766                 dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
14767                 dev_priv->display.get_initial_plane_config =
14768                         i9xx_get_initial_plane_config;
14769                 dev_priv->display.crtc_compute_clock =
14770                         ironlake_crtc_compute_clock;
14771                 dev_priv->display.crtc_enable = ironlake_crtc_enable;
14772                 dev_priv->display.crtc_disable = ironlake_crtc_disable;
14773         } else if (IS_CHERRYVIEW(dev_priv)) {
14774                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14775                 dev_priv->display.get_initial_plane_config =
14776                         i9xx_get_initial_plane_config;
14777                 dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
14778                 dev_priv->display.crtc_enable = valleyview_crtc_enable;
14779                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
14780         } else if (IS_VALLEYVIEW(dev_priv)) {
14781                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14782                 dev_priv->display.get_initial_plane_config =
14783                         i9xx_get_initial_plane_config;
14784                 dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
14785                 dev_priv->display.crtc_enable = valleyview_crtc_enable;
14786                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
14787         } else if (IS_G4X(dev_priv)) {
14788                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14789                 dev_priv->display.get_initial_plane_config =
14790                         i9xx_get_initial_plane_config;
14791                 dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
14792                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
14793                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
14794         } else if (IS_PINEVIEW(dev_priv)) {
14795                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14796                 dev_priv->display.get_initial_plane_config =
14797                         i9xx_get_initial_plane_config;
14798                 dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
14799                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
14800                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
14801         } else if (!IS_GEN2(dev_priv)) {
14802                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14803                 dev_priv->display.get_initial_plane_config =
14804                         i9xx_get_initial_plane_config;
14805                 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
14806                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
14807                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
14808         } else {
14809                 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14810                 dev_priv->display.get_initial_plane_config =
14811                         i9xx_get_initial_plane_config;
14812                 dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
14813                 dev_priv->display.crtc_enable = i9xx_crtc_enable;
14814                 dev_priv->display.crtc_disable = i9xx_crtc_disable;
14815         }
14816
14817         if (IS_GEN5(dev_priv)) {
14818                 dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
14819         } else if (IS_GEN6(dev_priv)) {
14820                 dev_priv->display.fdi_link_train = gen6_fdi_link_train;
14821         } else if (IS_IVYBRIDGE(dev_priv)) {
14822                 /* FIXME: detect B0+ stepping and use auto training */
14823                 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
14824         } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
14825                 dev_priv->display.fdi_link_train = hsw_fdi_link_train;
14826         }
14827
14828         if (INTEL_GEN(dev_priv) >= 9)
14829                 dev_priv->display.update_crtcs = skl_update_crtcs;
14830         else
14831                 dev_priv->display.update_crtcs = intel_update_crtcs;
14832 }
14833
14834 /*
14835  * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
14836  */
14837 static void quirk_ssc_force_disable(struct drm_device *dev)
14838 {
14839         struct drm_i915_private *dev_priv = to_i915(dev);
14840         dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
14841         DRM_INFO("applying lvds SSC disable quirk\n");
14842 }
14843
14844 /*
14845  * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
14846  * brightness value
14847  */
14848 static void quirk_invert_brightness(struct drm_device *dev)
14849 {
14850         struct drm_i915_private *dev_priv = to_i915(dev);
14851         dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
14852         DRM_INFO("applying inverted panel brightness quirk\n");
14853 }
14854
14855 /* Some VBT's incorrectly indicate no backlight is present */
14856 static void quirk_backlight_present(struct drm_device *dev)
14857 {
14858         struct drm_i915_private *dev_priv = to_i915(dev);
14859         dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT;
14860         DRM_INFO("applying backlight present quirk\n");
14861 }
14862
14863 /* Toshiba Satellite P50-C-18C requires T12 delay to be min 800ms
14864  * which is 300 ms greater than eDP spec T12 min.
14865  */
14866 static void quirk_increase_t12_delay(struct drm_device *dev)
14867 {
14868         struct drm_i915_private *dev_priv = to_i915(dev);
14869
14870         dev_priv->quirks |= QUIRK_INCREASE_T12_DELAY;
14871         DRM_INFO("Applying T12 delay quirk\n");
14872 }
14873
14874 /*
14875  * GeminiLake NUC HDMI outputs require additional off time
14876  * this allows the onboard retimer to correctly sync to signal
14877  */
14878 static void quirk_increase_ddi_disabled_time(struct drm_device *dev)
14879 {
14880         struct drm_i915_private *dev_priv = to_i915(dev);
14881
14882         dev_priv->quirks |= QUIRK_INCREASE_DDI_DISABLED_TIME;
14883         DRM_INFO("Applying Increase DDI Disabled quirk\n");
14884 }
14885
14886 struct intel_quirk {
14887         int device;
14888         int subsystem_vendor;
14889         int subsystem_device;
14890         void (*hook)(struct drm_device *dev);
14891 };
14892
14893 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
14894 struct intel_dmi_quirk {
14895         void (*hook)(struct drm_device *dev);
14896         const struct dmi_system_id (*dmi_id_list)[];
14897 };
14898
14899 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
14900 {
14901         DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
14902         return 1;
14903 }
14904
14905 static const struct intel_dmi_quirk intel_dmi_quirks[] = {
14906         {
14907                 .dmi_id_list = &(const struct dmi_system_id[]) {
14908                         {
14909                                 .callback = intel_dmi_reverse_brightness,
14910                                 .ident = "NCR Corporation",
14911                                 .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
14912                                             DMI_MATCH(DMI_PRODUCT_NAME, ""),
14913                                 },
14914                         },
14915                         { }  /* terminating entry */
14916                 },
14917                 .hook = quirk_invert_brightness,
14918         },
14919 };
14920
14921 static struct intel_quirk intel_quirks[] = {
14922         /* Lenovo U160 cannot use SSC on LVDS */
14923         { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
14924
14925         /* Sony Vaio Y cannot use SSC on LVDS */
14926         { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
14927
14928         /* Acer Aspire 5734Z must invert backlight brightness */
14929         { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
14930
14931         /* Acer/eMachines G725 */
14932         { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
14933
14934         /* Acer/eMachines e725 */
14935         { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
14936
14937         /* Acer/Packard Bell NCL20 */
14938         { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
14939
14940         /* Acer Aspire 4736Z */
14941         { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
14942
14943         /* Acer Aspire 5336 */
14944         { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
14945
14946         /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */
14947         { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present },
14948
14949         /* Acer C720 Chromebook (Core i3 4005U) */
14950         { 0x0a16, 0x1025, 0x0a11, quirk_backlight_present },
14951
14952         /* Apple Macbook 2,1 (Core 2 T7400) */
14953         { 0x27a2, 0x8086, 0x7270, quirk_backlight_present },
14954
14955         /* Apple Macbook 4,1 */
14956         { 0x2a02, 0x106b, 0x00a1, quirk_backlight_present },
14957
14958         /* Toshiba CB35 Chromebook (Celeron 2955U) */
14959         { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
14960
14961         /* HP Chromebook 14 (Celeron 2955U) */
14962         { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
14963
14964         /* Dell Chromebook 11 */
14965         { 0x0a06, 0x1028, 0x0a35, quirk_backlight_present },
14966
14967         /* Dell Chromebook 11 (2015 version) */
14968         { 0x0a16, 0x1028, 0x0a35, quirk_backlight_present },
14969
14970         /* Toshiba Satellite P50-C-18C */
14971         { 0x191B, 0x1179, 0xF840, quirk_increase_t12_delay },
14972
14973         /* GeminiLake NUC */
14974         { 0x3185, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
14975         { 0x3184, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
14976         /* ASRock ITX*/
14977         { 0x3185, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
14978         { 0x3184, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
14979 };
14980
14981 static void intel_init_quirks(struct drm_device *dev)
14982 {
14983         struct pci_dev *d = dev->pdev;
14984         int i;
14985
14986         for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
14987                 struct intel_quirk *q = &intel_quirks[i];
14988
14989                 if (d->device == q->device &&
14990                     (d->subsystem_vendor == q->subsystem_vendor ||
14991                      q->subsystem_vendor == PCI_ANY_ID) &&
14992                     (d->subsystem_device == q->subsystem_device ||
14993                      q->subsystem_device == PCI_ANY_ID))
14994                         q->hook(dev);
14995         }
14996         for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
14997                 if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
14998                         intel_dmi_quirks[i].hook(dev);
14999         }
15000 }
15001
15002 /* Disable the VGA plane that we never use */
15003 static void i915_disable_vga(struct drm_i915_private *dev_priv)
15004 {
15005         struct pci_dev *pdev = dev_priv->drm.pdev;
15006         u8 sr1;
15007         i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
15008
15009         /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
15010         vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
15011         outb(SR01, VGA_SR_INDEX);
15012         sr1 = inb(VGA_SR_DATA);
15013         outb(sr1 | 1<<5, VGA_SR_DATA);
15014         vga_put(pdev, VGA_RSRC_LEGACY_IO);
15015         udelay(300);
15016
15017         I915_WRITE(vga_reg, VGA_DISP_DISABLE);
15018         POSTING_READ(vga_reg);
15019 }
15020
15021 void intel_modeset_init_hw(struct drm_device *dev)
15022 {
15023         struct drm_i915_private *dev_priv = to_i915(dev);
15024
15025         intel_update_cdclk(dev_priv);
15026         intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
15027         dev_priv->cdclk.logical = dev_priv->cdclk.actual = dev_priv->cdclk.hw;
15028 }
15029
15030 /*
15031  * Calculate what we think the watermarks should be for the state we've read
15032  * out of the hardware and then immediately program those watermarks so that
15033  * we ensure the hardware settings match our internal state.
15034  *
15035  * We can calculate what we think WM's should be by creating a duplicate of the
15036  * current state (which was constructed during hardware readout) and running it
15037  * through the atomic check code to calculate new watermark values in the
15038  * state object.
15039  */
15040 static void sanitize_watermarks(struct drm_device *dev)
15041 {
15042         struct drm_i915_private *dev_priv = to_i915(dev);
15043         struct drm_atomic_state *state;
15044         struct intel_atomic_state *intel_state;
15045         struct drm_crtc *crtc;
15046         struct drm_crtc_state *cstate;
15047         struct drm_modeset_acquire_ctx ctx;
15048         int ret;
15049         int i;
15050
15051         /* Only supported on platforms that use atomic watermark design */
15052         if (!dev_priv->display.optimize_watermarks)
15053                 return;
15054
15055         /*
15056          * We need to hold connection_mutex before calling duplicate_state so
15057          * that the connector loop is protected.
15058          */
15059         drm_modeset_acquire_init(&ctx, 0);
15060 retry:
15061         ret = drm_modeset_lock_all_ctx(dev, &ctx);
15062         if (ret == -EDEADLK) {
15063                 drm_modeset_backoff(&ctx);
15064                 goto retry;
15065         } else if (WARN_ON(ret)) {
15066                 goto fail;
15067         }
15068
15069         state = drm_atomic_helper_duplicate_state(dev, &ctx);
15070         if (WARN_ON(IS_ERR(state)))
15071                 goto fail;
15072
15073         intel_state = to_intel_atomic_state(state);
15074
15075         /*
15076          * Hardware readout is the only time we don't want to calculate
15077          * intermediate watermarks (since we don't trust the current
15078          * watermarks).
15079          */
15080         if (!HAS_GMCH_DISPLAY(dev_priv))
15081                 intel_state->skip_intermediate_wm = true;
15082
15083         ret = intel_atomic_check(dev, state);
15084         if (ret) {
15085                 /*
15086                  * If we fail here, it means that the hardware appears to be
15087                  * programmed in a way that shouldn't be possible, given our
15088                  * understanding of watermark requirements.  This might mean a
15089                  * mistake in the hardware readout code or a mistake in the
15090                  * watermark calculations for a given platform.  Raise a WARN
15091                  * so that this is noticeable.
15092                  *
15093                  * If this actually happens, we'll have to just leave the
15094                  * BIOS-programmed watermarks untouched and hope for the best.
15095                  */
15096                 WARN(true, "Could not determine valid watermarks for inherited state\n");
15097                 goto put_state;
15098         }
15099
15100         /* Write calculated watermark values back */
15101         for_each_new_crtc_in_state(state, crtc, cstate, i) {
15102                 struct intel_crtc_state *cs = to_intel_crtc_state(cstate);
15103
15104                 cs->wm.need_postvbl_update = true;
15105                 dev_priv->display.optimize_watermarks(intel_state, cs);
15106
15107                 to_intel_crtc_state(crtc->state)->wm = cs->wm;
15108         }
15109
15110 put_state:
15111         drm_atomic_state_put(state);
15112 fail:
15113         drm_modeset_drop_locks(&ctx);
15114         drm_modeset_acquire_fini(&ctx);
15115 }
15116
15117 static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
15118 {
15119         if (IS_GEN5(dev_priv)) {
15120                 u32 fdi_pll_clk =
15121                         I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
15122
15123                 dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
15124         } else if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv)) {
15125                 dev_priv->fdi_pll_freq = 270000;
15126         } else {
15127                 return;
15128         }
15129
15130         DRM_DEBUG_DRIVER("FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
15131 }
15132
15133 int intel_modeset_init(struct drm_device *dev)
15134 {
15135         struct drm_i915_private *dev_priv = to_i915(dev);
15136         struct i915_ggtt *ggtt = &dev_priv->ggtt;
15137         enum pipe pipe;
15138         struct intel_crtc *crtc;
15139
15140         dev_priv->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
15141
15142         drm_mode_config_init(dev);
15143
15144         dev->mode_config.min_width = 0;
15145         dev->mode_config.min_height = 0;
15146
15147         dev->mode_config.preferred_depth = 24;
15148         dev->mode_config.prefer_shadow = 1;
15149
15150         dev->mode_config.allow_fb_modifiers = true;
15151
15152         dev->mode_config.funcs = &intel_mode_funcs;
15153
15154         init_llist_head(&dev_priv->atomic_helper.free_list);
15155         INIT_WORK(&dev_priv->atomic_helper.free_work,
15156                   intel_atomic_helper_free_state_worker);
15157
15158         intel_init_quirks(dev);
15159
15160         intel_init_pm(dev_priv);
15161
15162         if (INTEL_INFO(dev_priv)->num_pipes == 0)
15163                 return 0;
15164
15165         /*
15166          * There may be no VBT; and if the BIOS enabled SSC we can
15167          * just keep using it to avoid unnecessary flicker.  Whereas if the
15168          * BIOS isn't using it, don't assume it will work even if the VBT
15169          * indicates as much.
15170          */
15171         if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
15172                 bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
15173                                             DREF_SSC1_ENABLE);
15174
15175                 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
15176                         DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n",
15177                                      bios_lvds_use_ssc ? "en" : "dis",
15178                                      dev_priv->vbt.lvds_use_ssc ? "en" : "dis");
15179                         dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
15180                 }
15181         }
15182
15183         /* maximum framebuffer dimensions */
15184         if (IS_GEN2(dev_priv)) {
15185                 dev->mode_config.max_width = 2048;
15186                 dev->mode_config.max_height = 2048;
15187         } else if (IS_GEN3(dev_priv)) {
15188                 dev->mode_config.max_width = 4096;
15189                 dev->mode_config.max_height = 4096;
15190         } else {
15191                 dev->mode_config.max_width = 8192;
15192                 dev->mode_config.max_height = 8192;
15193         }
15194
15195         if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
15196                 dev->mode_config.cursor_width = IS_I845G(dev_priv) ? 64 : 512;
15197                 dev->mode_config.cursor_height = 1023;
15198         } else if (IS_GEN2(dev_priv)) {
15199                 dev->mode_config.cursor_width = 64;
15200                 dev->mode_config.cursor_height = 64;
15201         } else {
15202                 dev->mode_config.cursor_width = 256;
15203                 dev->mode_config.cursor_height = 256;
15204         }
15205
15206         dev->mode_config.fb_base = ggtt->gmadr.start;
15207
15208         DRM_DEBUG_KMS("%d display pipe%s available.\n",
15209                       INTEL_INFO(dev_priv)->num_pipes,
15210                       INTEL_INFO(dev_priv)->num_pipes > 1 ? "s" : "");
15211
15212         for_each_pipe(dev_priv, pipe) {
15213                 int ret;
15214
15215                 ret = intel_crtc_init(dev_priv, pipe);
15216                 if (ret) {
15217                         drm_mode_config_cleanup(dev);
15218                         return ret;
15219                 }
15220         }
15221
15222         intel_shared_dpll_init(dev);
15223         intel_update_fdi_pll_freq(dev_priv);
15224
15225         intel_update_czclk(dev_priv);
15226         intel_modeset_init_hw(dev);
15227
15228         if (dev_priv->max_cdclk_freq == 0)
15229                 intel_update_max_cdclk(dev_priv);
15230
15231         /* Just disable it once at startup */
15232         i915_disable_vga(dev_priv);
15233         intel_setup_outputs(dev_priv);
15234
15235         drm_modeset_lock_all(dev);
15236         intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
15237         drm_modeset_unlock_all(dev);
15238
15239         for_each_intel_crtc(dev, crtc) {
15240                 struct intel_initial_plane_config plane_config = {};
15241
15242                 if (!crtc->active)
15243                         continue;
15244
15245                 /*
15246                  * Note that reserving the BIOS fb up front prevents us
15247                  * from stuffing other stolen allocations like the ring
15248                  * on top.  This prevents some ugliness at boot time, and
15249                  * can even allow for smooth boot transitions if the BIOS
15250                  * fb is large enough for the active pipe configuration.
15251                  */
15252                 dev_priv->display.get_initial_plane_config(crtc,
15253                                                            &plane_config);
15254
15255                 /*
15256                  * If the fb is shared between multiple heads, we'll
15257                  * just get the first one.
15258                  */
15259                 intel_find_initial_plane_obj(crtc, &plane_config);
15260         }
15261
15262         /*
15263          * Make sure hardware watermarks really match the state we read out.
15264          * Note that we need to do this after reconstructing the BIOS fb's
15265          * since the watermark calculation done here will use pstate->fb.
15266          */
15267         if (!HAS_GMCH_DISPLAY(dev_priv))
15268                 sanitize_watermarks(dev);
15269
15270         return 0;
15271 }
15272
15273 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
15274 {
15275         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
15276         /* 640x480@60Hz, ~25175 kHz */
15277         struct dpll clock = {
15278                 .m1 = 18,
15279                 .m2 = 7,
15280                 .p1 = 13,
15281                 .p2 = 4,
15282                 .n = 2,
15283         };
15284         u32 dpll, fp;
15285         int i;
15286
15287         WARN_ON(i9xx_calc_dpll_params(48000, &clock) != 25154);
15288
15289         DRM_DEBUG_KMS("enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
15290                       pipe_name(pipe), clock.vco, clock.dot);
15291
15292         fp = i9xx_dpll_compute_fp(&clock);
15293         dpll = (I915_READ(DPLL(pipe)) & DPLL_DVO_2X_MODE) |
15294                 DPLL_VGA_MODE_DIS |
15295                 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
15296                 PLL_P2_DIVIDE_BY_4 |
15297                 PLL_REF_INPUT_DREFCLK |
15298                 DPLL_VCO_ENABLE;
15299
15300         I915_WRITE(FP0(pipe), fp);
15301         I915_WRITE(FP1(pipe), fp);
15302
15303         I915_WRITE(HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
15304         I915_WRITE(HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
15305         I915_WRITE(HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
15306         I915_WRITE(VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
15307         I915_WRITE(VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
15308         I915_WRITE(VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
15309         I915_WRITE(PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
15310
15311         /*
15312          * Apparently we need to have VGA mode enabled prior to changing
15313          * the P1/P2 dividers. Otherwise the DPLL will keep using the old
15314          * dividers, even though the register value does change.
15315          */
15316         I915_WRITE(DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
15317         I915_WRITE(DPLL(pipe), dpll);
15318
15319         /* Wait for the clocks to stabilize. */
15320         POSTING_READ(DPLL(pipe));
15321         udelay(150);
15322
15323         /* The pixel multiplier can only be updated once the
15324          * DPLL is enabled and the clocks are stable.
15325          *
15326          * So write it again.
15327          */
15328         I915_WRITE(DPLL(pipe), dpll);
15329
15330         /* We do this three times for luck */
15331         for (i = 0; i < 3 ; i++) {
15332                 I915_WRITE(DPLL(pipe), dpll);
15333                 POSTING_READ(DPLL(pipe));
15334                 udelay(150); /* wait for warmup */
15335         }
15336
15337         I915_WRITE(PIPECONF(pipe), PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
15338         POSTING_READ(PIPECONF(pipe));
15339
15340         intel_wait_for_pipe_scanline_moving(crtc);
15341 }
15342
15343 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
15344 {
15345         struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
15346
15347         DRM_DEBUG_KMS("disabling pipe %c due to force quirk\n",
15348                       pipe_name(pipe));
15349
15350         WARN_ON(I915_READ(DSPCNTR(PLANE_A)) & DISPLAY_PLANE_ENABLE);
15351         WARN_ON(I915_READ(DSPCNTR(PLANE_B)) & DISPLAY_PLANE_ENABLE);
15352         WARN_ON(I915_READ(DSPCNTR(PLANE_C)) & DISPLAY_PLANE_ENABLE);
15353         WARN_ON(I915_READ(CURCNTR(PIPE_A)) & MCURSOR_MODE);
15354         WARN_ON(I915_READ(CURCNTR(PIPE_B)) & MCURSOR_MODE);
15355
15356         I915_WRITE(PIPECONF(pipe), 0);
15357         POSTING_READ(PIPECONF(pipe));
15358
15359         intel_wait_for_pipe_scanline_stopped(crtc);
15360
15361         I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
15362         POSTING_READ(DPLL(pipe));
15363 }
15364
15365 static bool intel_plane_mapping_ok(struct intel_crtc *crtc,
15366                                    struct intel_plane *plane)
15367 {
15368         enum pipe pipe;
15369
15370         if (!plane->get_hw_state(plane, &pipe))
15371                 return true;
15372
15373         return pipe == crtc->pipe;
15374 }
15375
15376 static void
15377 intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
15378 {
15379         struct intel_crtc *crtc;
15380
15381         if (INTEL_GEN(dev_priv) >= 4)
15382                 return;
15383
15384         for_each_intel_crtc(&dev_priv->drm, crtc) {
15385                 struct intel_plane *plane =
15386                         to_intel_plane(crtc->base.primary);
15387
15388                 if (intel_plane_mapping_ok(crtc, plane))
15389                         continue;
15390
15391                 DRM_DEBUG_KMS("%s attached to the wrong pipe, disabling plane\n",
15392                               plane->base.name);
15393                 intel_plane_disable_noatomic(crtc, plane);
15394         }
15395 }
15396
15397 static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
15398 {
15399         struct drm_device *dev = crtc->base.dev;
15400         struct intel_encoder *encoder;
15401
15402         for_each_encoder_on_crtc(dev, &crtc->base, encoder)
15403                 return true;
15404
15405         return false;
15406 }
15407
15408 static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
15409 {
15410         struct drm_device *dev = encoder->base.dev;
15411         struct intel_connector *connector;
15412
15413         for_each_connector_on_encoder(dev, &encoder->base, connector)
15414                 return connector;
15415
15416         return NULL;
15417 }
15418
15419 static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
15420                               enum pipe pch_transcoder)
15421 {
15422         return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
15423                 (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
15424 }
15425
15426 static void intel_sanitize_crtc(struct intel_crtc *crtc,
15427                                 struct drm_modeset_acquire_ctx *ctx)
15428 {
15429         struct drm_device *dev = crtc->base.dev;
15430         struct drm_i915_private *dev_priv = to_i915(dev);
15431         enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
15432
15433         /* Clear any frame start delays used for debugging left by the BIOS */
15434         if (crtc->active && !transcoder_is_dsi(cpu_transcoder)) {
15435                 i915_reg_t reg = PIPECONF(cpu_transcoder);
15436
15437                 I915_WRITE(reg,
15438                            I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
15439         }
15440
15441         /* restore vblank interrupts to correct state */
15442         drm_crtc_vblank_reset(&crtc->base);
15443         if (crtc->active) {
15444                 struct intel_plane *plane;
15445
15446                 drm_crtc_vblank_on(&crtc->base);
15447
15448                 /* Disable everything but the primary plane */
15449                 for_each_intel_plane_on_crtc(dev, crtc, plane) {
15450                         const struct intel_plane_state *plane_state =
15451                                 to_intel_plane_state(plane->base.state);
15452
15453                         if (plane_state->base.visible &&
15454                             plane->base.type != DRM_PLANE_TYPE_PRIMARY)
15455                                 intel_plane_disable_noatomic(crtc, plane);
15456                 }
15457         }
15458
15459         /* Adjust the state of the output pipe according to whether we
15460          * have active connectors/encoders. */
15461         if (crtc->active && !intel_crtc_has_encoders(crtc))
15462                 intel_crtc_disable_noatomic(&crtc->base, ctx);
15463
15464         if (crtc->active || HAS_GMCH_DISPLAY(dev_priv)) {
15465                 /*
15466                  * We start out with underrun reporting disabled to avoid races.
15467                  * For correct bookkeeping mark this on active crtcs.
15468                  *
15469                  * Also on gmch platforms we dont have any hardware bits to
15470                  * disable the underrun reporting. Which means we need to start
15471                  * out with underrun reporting disabled also on inactive pipes,
15472                  * since otherwise we'll complain about the garbage we read when
15473                  * e.g. coming up after runtime pm.
15474                  *
15475                  * No protection against concurrent access is required - at
15476                  * worst a fifo underrun happens which also sets this to false.
15477                  */
15478                 crtc->cpu_fifo_underrun_disabled = true;
15479                 /*
15480                  * We track the PCH trancoder underrun reporting state
15481                  * within the crtc. With crtc for pipe A housing the underrun
15482                  * reporting state for PCH transcoder A, crtc for pipe B housing
15483                  * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
15484                  * and marking underrun reporting as disabled for the non-existing
15485                  * PCH transcoders B and C would prevent enabling the south
15486                  * error interrupt (see cpt_can_enable_serr_int()).
15487                  */
15488                 if (has_pch_trancoder(dev_priv, crtc->pipe))
15489                         crtc->pch_fifo_underrun_disabled = true;
15490         }
15491 }
15492
15493 static void intel_sanitize_encoder(struct intel_encoder *encoder)
15494 {
15495         struct intel_connector *connector;
15496
15497         /* We need to check both for a crtc link (meaning that the
15498          * encoder is active and trying to read from a pipe) and the
15499          * pipe itself being active. */
15500         bool has_active_crtc = encoder->base.crtc &&
15501                 to_intel_crtc(encoder->base.crtc)->active;
15502
15503         connector = intel_encoder_find_connector(encoder);
15504         if (connector && !has_active_crtc) {
15505                 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
15506                               encoder->base.base.id,
15507                               encoder->base.name);
15508
15509                 /* Connector is active, but has no active pipe. This is
15510                  * fallout from our resume register restoring. Disable
15511                  * the encoder manually again. */
15512                 if (encoder->base.crtc) {
15513                         struct drm_crtc_state *crtc_state = encoder->base.crtc->state;
15514
15515                         DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
15516                                       encoder->base.base.id,
15517                                       encoder->base.name);
15518                         encoder->disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
15519                         if (encoder->post_disable)
15520                                 encoder->post_disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
15521                 }
15522                 encoder->base.crtc = NULL;
15523
15524                 /* Inconsistent output/port/pipe state happens presumably due to
15525                  * a bug in one of the get_hw_state functions. Or someplace else
15526                  * in our code, like the register restore mess on resume. Clamp
15527                  * things to off as a safer default. */
15528
15529                 connector->base.dpms = DRM_MODE_DPMS_OFF;
15530                 connector->base.encoder = NULL;
15531         }
15532
15533         /* notify opregion of the sanitized encoder state */
15534         intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
15535 }
15536
15537 void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv)
15538 {
15539         i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
15540
15541         if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
15542                 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
15543                 i915_disable_vga(dev_priv);
15544         }
15545 }
15546
15547 void i915_redisable_vga(struct drm_i915_private *dev_priv)
15548 {
15549         /* This function can be called both from intel_modeset_setup_hw_state or
15550          * at a very early point in our resume sequence, where the power well
15551          * structures are not yet restored. Since this function is at a very
15552          * paranoid "someone might have enabled VGA while we were not looking"
15553          * level, just check if the power well is enabled instead of trying to
15554          * follow the "don't touch the power well if we don't need it" policy
15555          * the rest of the driver uses. */
15556         if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_VGA))
15557                 return;
15558
15559         i915_redisable_vga_power_on(dev_priv);
15560
15561         intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
15562 }
15563
15564 /* FIXME read out full plane state for all planes */
15565 static void readout_plane_state(struct intel_crtc *crtc)
15566 {
15567         struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
15568         struct intel_crtc_state *crtc_state =
15569                 to_intel_crtc_state(crtc->base.state);
15570         struct intel_plane *plane;
15571
15572         for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
15573                 struct intel_plane_state *plane_state =
15574                         to_intel_plane_state(plane->base.state);
15575                 enum pipe pipe;
15576                 bool visible;
15577
15578                 visible = plane->get_hw_state(plane, &pipe);
15579
15580                 intel_set_plane_visible(crtc_state, plane_state, visible);
15581         }
15582 }
15583
15584 static void intel_modeset_readout_hw_state(struct drm_device *dev)
15585 {
15586         struct drm_i915_private *dev_priv = to_i915(dev);
15587         enum pipe pipe;
15588         struct intel_crtc *crtc;
15589         struct intel_encoder *encoder;
15590         struct intel_connector *connector;
15591         struct drm_connector_list_iter conn_iter;
15592         int i;
15593
15594         dev_priv->active_crtcs = 0;
15595
15596         for_each_intel_crtc(dev, crtc) {
15597                 struct intel_crtc_state *crtc_state =
15598                         to_intel_crtc_state(crtc->base.state);
15599
15600                 __drm_atomic_helper_crtc_destroy_state(&crtc_state->base);
15601                 memset(crtc_state, 0, sizeof(*crtc_state));
15602                 crtc_state->base.crtc = &crtc->base;
15603
15604                 crtc_state->base.active = crtc_state->base.enable =
15605                         dev_priv->display.get_pipe_config(crtc, crtc_state);
15606
15607                 crtc->base.enabled = crtc_state->base.enable;
15608                 crtc->active = crtc_state->base.active;
15609
15610                 if (crtc_state->base.active)
15611                         dev_priv->active_crtcs |= 1 << crtc->pipe;
15612
15613                 readout_plane_state(crtc);
15614
15615                 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
15616                               crtc->base.base.id, crtc->base.name,
15617                               enableddisabled(crtc_state->base.active));
15618         }
15619
15620         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
15621                 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
15622
15623                 pll->on = pll->info->funcs->get_hw_state(dev_priv, pll,
15624                                                         &pll->state.hw_state);
15625                 pll->state.crtc_mask = 0;
15626                 for_each_intel_crtc(dev, crtc) {
15627                         struct intel_crtc_state *crtc_state =
15628                                 to_intel_crtc_state(crtc->base.state);
15629
15630                         if (crtc_state->base.active &&
15631                             crtc_state->shared_dpll == pll)
15632                                 pll->state.crtc_mask |= 1 << crtc->pipe;
15633                 }
15634                 pll->active_mask = pll->state.crtc_mask;
15635
15636                 DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
15637                               pll->info->name, pll->state.crtc_mask, pll->on);
15638         }
15639
15640         for_each_intel_encoder(dev, encoder) {
15641                 pipe = 0;
15642
15643                 if (encoder->get_hw_state(encoder, &pipe)) {
15644                         struct intel_crtc_state *crtc_state;
15645
15646                         crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
15647                         crtc_state = to_intel_crtc_state(crtc->base.state);
15648
15649                         encoder->base.crtc = &crtc->base;
15650                         encoder->get_config(encoder, crtc_state);
15651                 } else {
15652                         encoder->base.crtc = NULL;
15653                 }
15654
15655                 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
15656                               encoder->base.base.id, encoder->base.name,
15657                               enableddisabled(encoder->base.crtc),
15658                               pipe_name(pipe));
15659         }
15660
15661         drm_connector_list_iter_begin(dev, &conn_iter);
15662         for_each_intel_connector_iter(connector, &conn_iter) {
15663                 if (connector->get_hw_state(connector)) {
15664                         connector->base.dpms = DRM_MODE_DPMS_ON;
15665
15666                         encoder = connector->encoder;
15667                         connector->base.encoder = &encoder->base;
15668
15669                         if (encoder->base.crtc &&
15670                             encoder->base.crtc->state->active) {
15671                                 /*
15672                                  * This has to be done during hardware readout
15673                                  * because anything calling .crtc_disable may
15674                                  * rely on the connector_mask being accurate.
15675                                  */
15676                                 encoder->base.crtc->state->connector_mask |=
15677                                         drm_connector_mask(&connector->base);
15678                                 encoder->base.crtc->state->encoder_mask |=
15679                                         drm_encoder_mask(&encoder->base);
15680                         }
15681
15682                 } else {
15683                         connector->base.dpms = DRM_MODE_DPMS_OFF;
15684                         connector->base.encoder = NULL;
15685                 }
15686                 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
15687                               connector->base.base.id, connector->base.name,
15688                               enableddisabled(connector->base.encoder));
15689         }
15690         drm_connector_list_iter_end(&conn_iter);
15691
15692         for_each_intel_crtc(dev, crtc) {
15693                 struct intel_crtc_state *crtc_state =
15694                         to_intel_crtc_state(crtc->base.state);
15695                 int min_cdclk = 0;
15696
15697                 memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
15698                 if (crtc_state->base.active) {
15699                         intel_mode_from_pipe_config(&crtc->base.mode, crtc_state);
15700                         crtc->base.mode.hdisplay = crtc_state->pipe_src_w;
15701                         crtc->base.mode.vdisplay = crtc_state->pipe_src_h;
15702                         intel_mode_from_pipe_config(&crtc_state->base.adjusted_mode, crtc_state);
15703                         WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
15704
15705                         /*
15706                          * The initial mode needs to be set in order to keep
15707                          * the atomic core happy. It wants a valid mode if the
15708                          * crtc's enabled, so we do the above call.
15709                          *
15710                          * But we don't set all the derived state fully, hence
15711                          * set a flag to indicate that a full recalculation is
15712                          * needed on the next commit.
15713                          */
15714                         crtc_state->base.mode.private_flags = I915_MODE_FLAG_INHERITED;
15715
15716                         intel_crtc_compute_pixel_rate(crtc_state);
15717
15718                         if (dev_priv->display.modeset_calc_cdclk) {
15719                                 min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
15720                                 if (WARN_ON(min_cdclk < 0))
15721                                         min_cdclk = 0;
15722                         }
15723
15724                         drm_calc_timestamping_constants(&crtc->base,
15725                                                         &crtc_state->base.adjusted_mode);
15726                         update_scanline_offset(crtc);
15727                 }
15728
15729                 dev_priv->min_cdclk[crtc->pipe] = min_cdclk;
15730                 dev_priv->min_voltage_level[crtc->pipe] =
15731                         crtc_state->min_voltage_level;
15732
15733                 intel_pipe_config_sanity_check(dev_priv, crtc_state);
15734         }
15735 }
15736
15737 static void
15738 get_encoder_power_domains(struct drm_i915_private *dev_priv)
15739 {
15740         struct intel_encoder *encoder;
15741
15742         for_each_intel_encoder(&dev_priv->drm, encoder) {
15743                 u64 get_domains;
15744                 enum intel_display_power_domain domain;
15745                 struct intel_crtc_state *crtc_state;
15746
15747                 if (!encoder->get_power_domains)
15748                         continue;
15749
15750                 /*
15751                  * MST-primary and inactive encoders don't have a crtc state
15752                  * and neither of these require any power domain references.
15753                  */
15754                 if (!encoder->base.crtc)
15755                         continue;
15756
15757                 crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
15758                 get_domains = encoder->get_power_domains(encoder, crtc_state);
15759                 for_each_power_domain(domain, get_domains)
15760                         intel_display_power_get(dev_priv, domain);
15761         }
15762 }
15763
15764 static void intel_early_display_was(struct drm_i915_private *dev_priv)
15765 {
15766         /* Display WA #1185 WaDisableDARBFClkGating:cnl,glk */
15767         if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
15768                 I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
15769                            DARBF_GATING_DIS);
15770
15771         if (IS_HASWELL(dev_priv)) {
15772                 /*
15773                  * WaRsPkgCStateDisplayPMReq:hsw
15774                  * System hang if this isn't done before disabling all planes!
15775                  */
15776                 I915_WRITE(CHICKEN_PAR1_1,
15777                            I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
15778         }
15779 }
15780
15781 /* Scan out the current hw modeset state,
15782  * and sanitizes it to the current state
15783  */
15784 static void
15785 intel_modeset_setup_hw_state(struct drm_device *dev,
15786                              struct drm_modeset_acquire_ctx *ctx)
15787 {
15788         struct drm_i915_private *dev_priv = to_i915(dev);
15789         enum pipe pipe;
15790         struct intel_crtc *crtc;
15791         struct intel_encoder *encoder;
15792         int i;
15793
15794         intel_early_display_was(dev_priv);
15795         intel_modeset_readout_hw_state(dev);
15796
15797         /* HW state is read out, now we need to sanitize this mess. */
15798         get_encoder_power_domains(dev_priv);
15799
15800         intel_sanitize_plane_mapping(dev_priv);
15801
15802         for_each_intel_encoder(dev, encoder) {
15803                 intel_sanitize_encoder(encoder);
15804         }
15805
15806         for_each_pipe(dev_priv, pipe) {
15807                 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
15808
15809                 intel_sanitize_crtc(crtc, ctx);
15810                 intel_dump_pipe_config(crtc, crtc->config,
15811                                        "[setup_hw_state]");
15812         }
15813
15814         intel_modeset_update_connector_atomic_state(dev);
15815
15816         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
15817                 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
15818
15819                 if (!pll->on || pll->active_mask)
15820                         continue;
15821
15822                 DRM_DEBUG_KMS("%s enabled but not in use, disabling\n",
15823                               pll->info->name);
15824
15825                 pll->info->funcs->disable(dev_priv, pll);
15826                 pll->on = false;
15827         }
15828
15829         if (IS_G4X(dev_priv)) {
15830                 g4x_wm_get_hw_state(dev);
15831                 g4x_wm_sanitize(dev_priv);
15832         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
15833                 vlv_wm_get_hw_state(dev);
15834                 vlv_wm_sanitize(dev_priv);
15835         } else if (INTEL_GEN(dev_priv) >= 9) {
15836                 skl_wm_get_hw_state(dev);
15837         } else if (HAS_PCH_SPLIT(dev_priv)) {
15838                 ilk_wm_get_hw_state(dev);
15839         }
15840
15841         for_each_intel_crtc(dev, crtc) {
15842                 u64 put_domains;
15843
15844                 put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc->config);
15845                 if (WARN_ON(put_domains))
15846                         modeset_put_power_domains(dev_priv, put_domains);
15847         }
15848         intel_display_set_init_power(dev_priv, false);
15849
15850         intel_power_domains_verify_state(dev_priv);
15851
15852         intel_fbc_init_pipe_state(dev_priv);
15853 }
15854
15855 void intel_display_resume(struct drm_device *dev)
15856 {
15857         struct drm_i915_private *dev_priv = to_i915(dev);
15858         struct drm_atomic_state *state = dev_priv->modeset_restore_state;
15859         struct drm_modeset_acquire_ctx ctx;
15860         int ret;
15861
15862         dev_priv->modeset_restore_state = NULL;
15863         if (state)
15864                 state->acquire_ctx = &ctx;
15865
15866         drm_modeset_acquire_init(&ctx, 0);
15867
15868         while (1) {
15869                 ret = drm_modeset_lock_all_ctx(dev, &ctx);
15870                 if (ret != -EDEADLK)
15871                         break;
15872
15873                 drm_modeset_backoff(&ctx);
15874         }
15875
15876         if (!ret)
15877                 ret = __intel_display_resume(dev, state, &ctx);
15878
15879         intel_enable_ipc(dev_priv);
15880         drm_modeset_drop_locks(&ctx);
15881         drm_modeset_acquire_fini(&ctx);
15882
15883         if (ret)
15884                 DRM_ERROR("Restoring old state failed with %i\n", ret);
15885         if (state)
15886                 drm_atomic_state_put(state);
15887 }
15888
15889 int intel_connector_register(struct drm_connector *connector)
15890 {
15891         struct intel_connector *intel_connector = to_intel_connector(connector);
15892         int ret;
15893
15894         ret = intel_backlight_device_register(intel_connector);
15895         if (ret)
15896                 goto err;
15897
15898         return 0;
15899
15900 err:
15901         return ret;
15902 }
15903
15904 void intel_connector_unregister(struct drm_connector *connector)
15905 {
15906         struct intel_connector *intel_connector = to_intel_connector(connector);
15907
15908         intel_backlight_device_unregister(intel_connector);
15909         intel_panel_destroy_backlight(connector);
15910 }
15911
15912 static void intel_hpd_poll_fini(struct drm_device *dev)
15913 {
15914         struct intel_connector *connector;
15915         struct drm_connector_list_iter conn_iter;
15916
15917         /* Kill all the work that may have been queued by hpd. */
15918         drm_connector_list_iter_begin(dev, &conn_iter);
15919         for_each_intel_connector_iter(connector, &conn_iter) {
15920                 if (connector->modeset_retry_work.func)
15921                         cancel_work_sync(&connector->modeset_retry_work);
15922                 if (connector->hdcp_shim) {
15923                         cancel_delayed_work_sync(&connector->hdcp_check_work);
15924                         cancel_work_sync(&connector->hdcp_prop_work);
15925                 }
15926         }
15927         drm_connector_list_iter_end(&conn_iter);
15928 }
15929
15930 void intel_modeset_cleanup(struct drm_device *dev)
15931 {
15932         struct drm_i915_private *dev_priv = to_i915(dev);
15933
15934         flush_workqueue(dev_priv->modeset_wq);
15935
15936         flush_work(&dev_priv->atomic_helper.free_work);
15937         WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list));
15938
15939         intel_disable_gt_powersave(dev_priv);
15940
15941         /*
15942          * Interrupts and polling as the first thing to avoid creating havoc.
15943          * Too much stuff here (turning of connectors, ...) would
15944          * experience fancy races otherwise.
15945          */
15946         intel_irq_uninstall(dev_priv);
15947
15948         /*
15949          * Due to the hpd irq storm handling the hotplug work can re-arm the
15950          * poll handlers. Hence disable polling after hpd handling is shut down.
15951          */
15952         intel_hpd_poll_fini(dev);
15953
15954         /* poll work can call into fbdev, hence clean that up afterwards */
15955         intel_fbdev_fini(dev_priv);
15956
15957         intel_unregister_dsm_handler();
15958
15959         intel_fbc_global_disable(dev_priv);
15960
15961         /* flush any delayed tasks or pending work */
15962         flush_scheduled_work();
15963
15964         drm_mode_config_cleanup(dev);
15965
15966         intel_cleanup_overlay(dev_priv);
15967
15968         intel_cleanup_gt_powersave(dev_priv);
15969
15970         intel_teardown_gmbus(dev_priv);
15971
15972         destroy_workqueue(dev_priv->modeset_wq);
15973 }
15974
15975 void intel_connector_attach_encoder(struct intel_connector *connector,
15976                                     struct intel_encoder *encoder)
15977 {
15978         connector->encoder = encoder;
15979         drm_connector_attach_encoder(&connector->base, &encoder->base);
15980 }
15981
15982 /*
15983  * set vga decode state - true == enable VGA decode
15984  */
15985 int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, bool state)
15986 {
15987         unsigned reg = INTEL_GEN(dev_priv) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
15988         u16 gmch_ctrl;
15989
15990         if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
15991                 DRM_ERROR("failed to read control word\n");
15992                 return -EIO;
15993         }
15994
15995         if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state)
15996                 return 0;
15997
15998         if (state)
15999                 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
16000         else
16001                 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
16002
16003         if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) {
16004                 DRM_ERROR("failed to write control word\n");
16005                 return -EIO;
16006         }
16007
16008         return 0;
16009 }
16010
16011 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
16012
16013 struct intel_display_error_state {
16014
16015         u32 power_well_driver;
16016
16017         int num_transcoders;
16018
16019         struct intel_cursor_error_state {
16020                 u32 control;
16021                 u32 position;
16022                 u32 base;
16023                 u32 size;
16024         } cursor[I915_MAX_PIPES];
16025
16026         struct intel_pipe_error_state {
16027                 bool power_domain_on;
16028                 u32 source;
16029                 u32 stat;
16030         } pipe[I915_MAX_PIPES];
16031
16032         struct intel_plane_error_state {
16033                 u32 control;
16034                 u32 stride;
16035                 u32 size;
16036                 u32 pos;
16037                 u32 addr;
16038                 u32 surface;
16039                 u32 tile_offset;
16040         } plane[I915_MAX_PIPES];
16041
16042         struct intel_transcoder_error_state {
16043                 bool power_domain_on;
16044                 enum transcoder cpu_transcoder;
16045
16046                 u32 conf;
16047
16048                 u32 htotal;
16049                 u32 hblank;
16050                 u32 hsync;
16051                 u32 vtotal;
16052                 u32 vblank;
16053                 u32 vsync;
16054         } transcoder[4];
16055 };
16056
16057 struct intel_display_error_state *
16058 intel_display_capture_error_state(struct drm_i915_private *dev_priv)
16059 {
16060         struct intel_display_error_state *error;
16061         int transcoders[] = {
16062                 TRANSCODER_A,
16063                 TRANSCODER_B,
16064                 TRANSCODER_C,
16065                 TRANSCODER_EDP,
16066         };
16067         int i;
16068
16069         if (INTEL_INFO(dev_priv)->num_pipes == 0)
16070                 return NULL;
16071
16072         error = kzalloc(sizeof(*error), GFP_ATOMIC);
16073         if (error == NULL)
16074                 return NULL;
16075
16076         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
16077                 error->power_well_driver =
16078                         I915_READ(HSW_PWR_WELL_CTL_DRIVER(HSW_DISP_PW_GLOBAL));
16079
16080         for_each_pipe(dev_priv, i) {
16081                 error->pipe[i].power_domain_on =
16082                         __intel_display_power_is_enabled(dev_priv,
16083                                                          POWER_DOMAIN_PIPE(i));
16084                 if (!error->pipe[i].power_domain_on)
16085                         continue;
16086
16087                 error->cursor[i].control = I915_READ(CURCNTR(i));
16088                 error->cursor[i].position = I915_READ(CURPOS(i));
16089                 error->cursor[i].base = I915_READ(CURBASE(i));
16090
16091                 error->plane[i].control = I915_READ(DSPCNTR(i));
16092                 error->plane[i].stride = I915_READ(DSPSTRIDE(i));
16093                 if (INTEL_GEN(dev_priv) <= 3) {
16094                         error->plane[i].size = I915_READ(DSPSIZE(i));
16095                         error->plane[i].pos = I915_READ(DSPPOS(i));
16096                 }
16097                 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
16098                         error->plane[i].addr = I915_READ(DSPADDR(i));
16099                 if (INTEL_GEN(dev_priv) >= 4) {
16100                         error->plane[i].surface = I915_READ(DSPSURF(i));
16101                         error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
16102                 }
16103
16104                 error->pipe[i].source = I915_READ(PIPESRC(i));
16105
16106                 if (HAS_GMCH_DISPLAY(dev_priv))
16107                         error->pipe[i].stat = I915_READ(PIPESTAT(i));
16108         }
16109
16110         /* Note: this does not include DSI transcoders. */
16111         error->num_transcoders = INTEL_INFO(dev_priv)->num_pipes;
16112         if (HAS_DDI(dev_priv))
16113                 error->num_transcoders++; /* Account for eDP. */
16114
16115         for (i = 0; i < error->num_transcoders; i++) {
16116                 enum transcoder cpu_transcoder = transcoders[i];
16117
16118                 error->transcoder[i].power_domain_on =
16119                         __intel_display_power_is_enabled(dev_priv,
16120                                 POWER_DOMAIN_TRANSCODER(cpu_transcoder));
16121                 if (!error->transcoder[i].power_domain_on)
16122                         continue;
16123
16124                 error->transcoder[i].cpu_transcoder = cpu_transcoder;
16125
16126                 error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
16127                 error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
16128                 error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
16129                 error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
16130                 error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
16131                 error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
16132                 error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
16133         }
16134
16135         return error;
16136 }
16137
16138 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
16139
16140 void
16141 intel_display_print_error_state(struct drm_i915_error_state_buf *m,
16142                                 struct intel_display_error_state *error)
16143 {
16144         struct drm_i915_private *dev_priv = m->i915;
16145         int i;
16146
16147         if (!error)
16148                 return;
16149
16150         err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev_priv)->num_pipes);
16151         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
16152                 err_printf(m, "PWR_WELL_CTL2: %08x\n",
16153                            error->power_well_driver);
16154         for_each_pipe(dev_priv, i) {
16155                 err_printf(m, "Pipe [%d]:\n", i);
16156                 err_printf(m, "  Power: %s\n",
16157                            onoff(error->pipe[i].power_domain_on));
16158                 err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
16159                 err_printf(m, "  STAT: %08x\n", error->pipe[i].stat);
16160
16161                 err_printf(m, "Plane [%d]:\n", i);
16162                 err_printf(m, "  CNTR: %08x\n", error->plane[i].control);
16163                 err_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
16164                 if (INTEL_GEN(dev_priv) <= 3) {
16165                         err_printf(m, "  SIZE: %08x\n", error->plane[i].size);
16166                         err_printf(m, "  POS: %08x\n", error->plane[i].pos);
16167                 }
16168                 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
16169                         err_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
16170                 if (INTEL_GEN(dev_priv) >= 4) {
16171                         err_printf(m, "  SURF: %08x\n", error->plane[i].surface);
16172                         err_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
16173                 }
16174
16175                 err_printf(m, "Cursor [%d]:\n", i);
16176                 err_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
16177                 err_printf(m, "  POS: %08x\n", error->cursor[i].position);
16178                 err_printf(m, "  BASE: %08x\n", error->cursor[i].base);
16179         }
16180
16181         for (i = 0; i < error->num_transcoders; i++) {
16182                 err_printf(m, "CPU transcoder: %s\n",
16183                            transcoder_name(error->transcoder[i].cpu_transcoder));
16184                 err_printf(m, "  Power: %s\n",
16185                            onoff(error->transcoder[i].power_domain_on));
16186                 err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
16187                 err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
16188                 err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
16189                 err_printf(m, "  HSYNC: %08x\n", error->transcoder[i].hsync);
16190                 err_printf(m, "  VTOTAL: %08x\n", error->transcoder[i].vtotal);
16191                 err_printf(m, "  VBLANK: %08x\n", error->transcoder[i].vblank);
16192                 err_printf(m, "  VSYNC: %08x\n", error->transcoder[i].vsync);
16193         }
16194 }
16195
16196 #endif