drm/i915/fbc: Nuke bogus single pipe fbc1 restriction
[linux-block.git] / drivers / gpu / drm / i915 / display / intel_fbc.c
CommitLineData
7ff0ebcc
RV
1/*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
94b83957
RV
24/**
25 * DOC: Frame Buffer Compression (FBC)
26 *
27 * FBC tries to save memory bandwidth (and so power consumption) by
28 * compressing the amount of memory used by the display. It is total
29 * transparent to user space and completely handled in the kernel.
7ff0ebcc
RV
30 *
31 * The benefits of FBC are mostly visible with solid backgrounds and
94b83957
RV
32 * variation-less patterns. It comes from keeping the memory footprint small
33 * and having fewer memory pages opened and accessed for refreshing the display.
7ff0ebcc 34 *
94b83957
RV
35 * i915 is responsible to reserve stolen memory for FBC and configure its
36 * offset on proper registers. The hardware takes care of all
37 * compress/decompress. However there are many known cases where we have to
38 * forcibly disable it to allow proper screen updates.
7ff0ebcc
RV
39 */
40
fcd70cd3
DV
41#include <drm/drm_fourcc.h>
42
94b83957 43#include "i915_drv.h"
1d455f8d 44#include "intel_display_types.h"
98afa316 45#include "intel_fbc.h"
55367a27 46#include "intel_frontbuffer.h"
94b83957 47
9f218336
PZ
48static inline bool fbc_supported(struct drm_i915_private *dev_priv)
49{
8c40074c 50 return HAS_FBC(dev_priv);
9f218336
PZ
51}
52
2db3366b
PZ
53/*
54 * In some platforms where the CRTC's x:0/y:0 coordinates doesn't match the
55 * frontbuffer's x:0/y:0 coordinates we lie to the hardware about the plane's
56 * origin so the x and y offsets can actually fit the registers. As a
57 * consequence, the fence doesn't really start exactly at the display plane
58 * address we program because it starts at the real start of the buffer, so we
59 * have to take this into consideration here.
60 */
31d1d3c8 61static unsigned int get_crtc_fence_y_offset(struct intel_fbc *fbc)
2db3366b 62{
31d1d3c8 63 return fbc->state_cache.plane.y - fbc->state_cache.plane.adjusted_y;
2db3366b
PZ
64}
65
c5ecd469
PZ
66/*
67 * For SKL+, the plane source size used by the hardware is based on the value we
68 * write to the PLANE_SIZE register. For BDW-, the hardware looks at the value
69 * we wrote to PIPESRC.
70 */
aaf78d27 71static void intel_fbc_get_plane_source_size(struct intel_fbc_state_cache *cache,
c5ecd469
PZ
72 int *width, int *height)
73{
c5ecd469 74 if (width)
73714c05 75 *width = cache->plane.src_w;
c5ecd469 76 if (height)
73714c05 77 *height = cache->plane.src_h;
c5ecd469
PZ
78}
79
aaf78d27
PZ
80static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv,
81 struct intel_fbc_state_cache *cache)
c5ecd469 82{
c5ecd469
PZ
83 int lines;
84
aaf78d27 85 intel_fbc_get_plane_source_size(cache, NULL, &lines);
cf819eff 86 if (IS_GEN(dev_priv, 7))
c5ecd469 87 lines = min(lines, 2048);
79f2624b
PZ
88 else if (INTEL_GEN(dev_priv) >= 8)
89 lines = min(lines, 2560);
c5ecd469
PZ
90
91 /* Hardware needs the full buffer stride, not just the active area. */
aaf78d27 92 return lines * cache->fb.stride;
c5ecd469
PZ
93}
94
0e631adc 95static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv)
7ff0ebcc 96{
7ff0ebcc
RV
97 u32 fbc_ctl;
98
7ff0ebcc
RV
99 /* Disable compression */
100 fbc_ctl = I915_READ(FBC_CONTROL);
101 if ((fbc_ctl & FBC_CTL_EN) == 0)
102 return;
103
104 fbc_ctl &= ~FBC_CTL_EN;
105 I915_WRITE(FBC_CONTROL, fbc_ctl);
106
107 /* Wait for compressing bit to clear */
4cb3b44d
DCS
108 if (intel_de_wait_for_clear(dev_priv, FBC_STATUS,
109 FBC_STAT_COMPRESSING, 10)) {
7ff0ebcc
RV
110 DRM_DEBUG_KMS("FBC idle timed out\n");
111 return;
112 }
7ff0ebcc
RV
113}
114
b183b3f1 115static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
7ff0ebcc 116{
b183b3f1 117 struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
7ff0ebcc
RV
118 int cfb_pitch;
119 int i;
120 u32 fbc_ctl;
121
60ee5cd2 122 /* Note: fbc.threshold == 1 for i8xx */
b183b3f1
PZ
123 cfb_pitch = params->cfb_size / FBC_LL_SIZE;
124 if (params->fb.stride < cfb_pitch)
125 cfb_pitch = params->fb.stride;
7ff0ebcc
RV
126
127 /* FBC_CTL wants 32B or 64B units */
cf819eff 128 if (IS_GEN(dev_priv, 2))
7ff0ebcc
RV
129 cfb_pitch = (cfb_pitch / 32) - 1;
130 else
131 cfb_pitch = (cfb_pitch / 64) - 1;
132
133 /* Clear old tags */
134 for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
4d110c71 135 I915_WRITE(FBC_TAG(i), 0);
7ff0ebcc 136
cf819eff 137 if (IS_GEN(dev_priv, 4)) {
7ff0ebcc
RV
138 u32 fbc_ctl2;
139
140 /* Set it up... */
141 fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
ed15030d 142 fbc_ctl2 |= FBC_CTL_PLANE(params->crtc.i9xx_plane);
7ff0ebcc 143 I915_WRITE(FBC_CONTROL2, fbc_ctl2);
b183b3f1 144 I915_WRITE(FBC_FENCE_OFF, params->crtc.fence_y_offset);
7ff0ebcc
RV
145 }
146
147 /* enable it... */
148 fbc_ctl = I915_READ(FBC_CONTROL);
149 fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT;
150 fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC;
7733b49b 151 if (IS_I945GM(dev_priv))
7ff0ebcc
RV
152 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
153 fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
be1e3415 154 fbc_ctl |= params->vma->fence->id;
7ff0ebcc 155 I915_WRITE(FBC_CONTROL, fbc_ctl);
7ff0ebcc
RV
156}
157
0e631adc 158static bool i8xx_fbc_is_active(struct drm_i915_private *dev_priv)
7ff0ebcc 159{
7ff0ebcc
RV
160 return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
161}
162
b183b3f1 163static void g4x_fbc_activate(struct drm_i915_private *dev_priv)
7ff0ebcc 164{
b183b3f1 165 struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
7ff0ebcc
RV
166 u32 dpfc_ctl;
167
ed15030d 168 dpfc_ctl = DPFC_CTL_PLANE(params->crtc.i9xx_plane) | DPFC_SR_EN;
801c8fe8 169 if (params->fb.format->cpp[0] == 2)
7ff0ebcc
RV
170 dpfc_ctl |= DPFC_CTL_LIMIT_2X;
171 else
172 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
7ff0ebcc 173
1c9b6b13 174 if (params->flags & PLANE_HAS_FENCE) {
be1e3415 175 dpfc_ctl |= DPFC_CTL_FENCE_EN | params->vma->fence->id;
12ecf4b9
CW
176 I915_WRITE(DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
177 } else {
178 I915_WRITE(DPFC_FENCE_YOFF, 0);
179 }
7ff0ebcc
RV
180
181 /* enable it... */
182 I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
7ff0ebcc
RV
183}
184
0e631adc 185static void g4x_fbc_deactivate(struct drm_i915_private *dev_priv)
7ff0ebcc 186{
7ff0ebcc
RV
187 u32 dpfc_ctl;
188
7ff0ebcc
RV
189 /* Disable compression */
190 dpfc_ctl = I915_READ(DPFC_CONTROL);
191 if (dpfc_ctl & DPFC_CTL_EN) {
192 dpfc_ctl &= ~DPFC_CTL_EN;
193 I915_WRITE(DPFC_CONTROL, dpfc_ctl);
7ff0ebcc
RV
194 }
195}
196
0e631adc 197static bool g4x_fbc_is_active(struct drm_i915_private *dev_priv)
7ff0ebcc 198{
7ff0ebcc
RV
199 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
200}
201
d5ce4164
PZ
202/* This function forces a CFB recompression through the nuke operation. */
203static void intel_fbc_recompress(struct drm_i915_private *dev_priv)
7ff0ebcc 204{
dbef0f15
PZ
205 I915_WRITE(MSG_FBC_REND_STATE, FBC_REND_NUKE);
206 POSTING_READ(MSG_FBC_REND_STATE);
7ff0ebcc
RV
207}
208
b183b3f1 209static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
7ff0ebcc 210{
b183b3f1 211 struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
7ff0ebcc 212 u32 dpfc_ctl;
ce65e47b 213 int threshold = dev_priv->fbc.threshold;
7ff0ebcc 214
ed15030d 215 dpfc_ctl = DPFC_CTL_PLANE(params->crtc.i9xx_plane);
801c8fe8 216 if (params->fb.format->cpp[0] == 2)
ce65e47b 217 threshold++;
7ff0ebcc 218
ce65e47b 219 switch (threshold) {
7ff0ebcc
RV
220 case 4:
221 case 3:
222 dpfc_ctl |= DPFC_CTL_LIMIT_4X;
223 break;
224 case 2:
225 dpfc_ctl |= DPFC_CTL_LIMIT_2X;
226 break;
227 case 1:
228 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
229 break;
230 }
12ecf4b9 231
1c9b6b13 232 if (params->flags & PLANE_HAS_FENCE) {
12ecf4b9 233 dpfc_ctl |= DPFC_CTL_FENCE_EN;
cf819eff 234 if (IS_GEN(dev_priv, 5))
be1e3415 235 dpfc_ctl |= params->vma->fence->id;
cf819eff 236 if (IS_GEN(dev_priv, 6)) {
12ecf4b9 237 I915_WRITE(SNB_DPFC_CTL_SA,
be1e3415
CW
238 SNB_CPU_FENCE_ENABLE |
239 params->vma->fence->id);
12ecf4b9
CW
240 I915_WRITE(DPFC_CPU_FENCE_OFFSET,
241 params->crtc.fence_y_offset);
242 }
243 } else {
cf819eff 244 if (IS_GEN(dev_priv, 6)) {
12ecf4b9
CW
245 I915_WRITE(SNB_DPFC_CTL_SA, 0);
246 I915_WRITE(DPFC_CPU_FENCE_OFFSET, 0);
247 }
248 }
7ff0ebcc 249
b183b3f1 250 I915_WRITE(ILK_DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
be1e3415
CW
251 I915_WRITE(ILK_FBC_RT_BASE,
252 i915_ggtt_offset(params->vma) | ILK_FBC_RT_VALID);
7ff0ebcc
RV
253 /* enable it... */
254 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
255
d5ce4164 256 intel_fbc_recompress(dev_priv);
7ff0ebcc
RV
257}
258
0e631adc 259static void ilk_fbc_deactivate(struct drm_i915_private *dev_priv)
7ff0ebcc 260{
7ff0ebcc
RV
261 u32 dpfc_ctl;
262
7ff0ebcc
RV
263 /* Disable compression */
264 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
265 if (dpfc_ctl & DPFC_CTL_EN) {
266 dpfc_ctl &= ~DPFC_CTL_EN;
267 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
7ff0ebcc
RV
268 }
269}
270
0e631adc 271static bool ilk_fbc_is_active(struct drm_i915_private *dev_priv)
7ff0ebcc 272{
7ff0ebcc
RV
273 return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
274}
275
b183b3f1 276static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
7ff0ebcc 277{
b183b3f1 278 struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
7ff0ebcc 279 u32 dpfc_ctl;
ce65e47b 280 int threshold = dev_priv->fbc.threshold;
7ff0ebcc 281
5654a162 282 /* Display WA #0529: skl, kbl, bxt. */
cf819eff 283 if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv)) {
5654a162
PP
284 u32 val = I915_READ(CHICKEN_MISC_4);
285
286 val &= ~(FBC_STRIDE_OVERRIDE | FBC_STRIDE_MASK);
287
288 if (i915_gem_object_get_tiling(params->vma->obj) !=
289 I915_TILING_X)
290 val |= FBC_STRIDE_OVERRIDE | params->gen9_wa_cfb_stride;
291
292 I915_WRITE(CHICKEN_MISC_4, val);
293 }
294
d8514d63 295 dpfc_ctl = 0;
7733b49b 296 if (IS_IVYBRIDGE(dev_priv))
ed15030d 297 dpfc_ctl |= IVB_DPFC_CTL_PLANE(params->crtc.i9xx_plane);
d8514d63 298
801c8fe8 299 if (params->fb.format->cpp[0] == 2)
ce65e47b 300 threshold++;
7ff0ebcc 301
ce65e47b 302 switch (threshold) {
7ff0ebcc
RV
303 case 4:
304 case 3:
305 dpfc_ctl |= DPFC_CTL_LIMIT_4X;
306 break;
307 case 2:
308 dpfc_ctl |= DPFC_CTL_LIMIT_2X;
309 break;
310 case 1:
311 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
312 break;
313 }
314
1c9b6b13 315 if (params->flags & PLANE_HAS_FENCE) {
12ecf4b9
CW
316 dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
317 I915_WRITE(SNB_DPFC_CTL_SA,
be1e3415
CW
318 SNB_CPU_FENCE_ENABLE |
319 params->vma->fence->id);
12ecf4b9
CW
320 I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset);
321 } else {
322 I915_WRITE(SNB_DPFC_CTL_SA,0);
323 I915_WRITE(DPFC_CPU_FENCE_OFFSET, 0);
324 }
7ff0ebcc
RV
325
326 if (dev_priv->fbc.false_color)
327 dpfc_ctl |= FBC_CTL_FALSE_COLOR;
328
7733b49b 329 if (IS_IVYBRIDGE(dev_priv)) {
7ff0ebcc
RV
330 /* WaFbcAsynchFlipDisableFbcQueue:ivb */
331 I915_WRITE(ILK_DISPLAY_CHICKEN1,
332 I915_READ(ILK_DISPLAY_CHICKEN1) |
333 ILK_FBCQ_DIS);
40f4022e 334 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
7ff0ebcc 335 /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
b183b3f1
PZ
336 I915_WRITE(CHICKEN_PIPESL_1(params->crtc.pipe),
337 I915_READ(CHICKEN_PIPESL_1(params->crtc.pipe)) |
7ff0ebcc
RV
338 HSW_FBCQ_DIS);
339 }
340
d10e0cb7
JRS
341 if (INTEL_GEN(dev_priv) >= 11)
342 /* Wa_1409120013:icl,ehl,tgl */
cc49abc2
MR
343 I915_WRITE(ILK_DPFC_CHICKEN, ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL);
344
57012be9
PZ
345 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
346
d5ce4164 347 intel_fbc_recompress(dev_priv);
7ff0ebcc
RV
348}
349
8c40074c
PZ
350static bool intel_fbc_hw_is_active(struct drm_i915_private *dev_priv)
351{
5697d60f 352 if (INTEL_GEN(dev_priv) >= 5)
8c40074c
PZ
353 return ilk_fbc_is_active(dev_priv);
354 else if (IS_GM45(dev_priv))
355 return g4x_fbc_is_active(dev_priv);
356 else
357 return i8xx_fbc_is_active(dev_priv);
358}
359
360static void intel_fbc_hw_activate(struct drm_i915_private *dev_priv)
361{
5375ce9f
PZ
362 struct intel_fbc *fbc = &dev_priv->fbc;
363
364 fbc->active = true;
365
5697d60f 366 if (INTEL_GEN(dev_priv) >= 7)
8c40074c 367 gen7_fbc_activate(dev_priv);
5697d60f 368 else if (INTEL_GEN(dev_priv) >= 5)
8c40074c
PZ
369 ilk_fbc_activate(dev_priv);
370 else if (IS_GM45(dev_priv))
371 g4x_fbc_activate(dev_priv);
372 else
373 i8xx_fbc_activate(dev_priv);
374}
375
376static void intel_fbc_hw_deactivate(struct drm_i915_private *dev_priv)
377{
5375ce9f
PZ
378 struct intel_fbc *fbc = &dev_priv->fbc;
379
380 fbc->active = false;
381
5697d60f 382 if (INTEL_GEN(dev_priv) >= 5)
8c40074c
PZ
383 ilk_fbc_deactivate(dev_priv);
384 else if (IS_GM45(dev_priv))
385 g4x_fbc_deactivate(dev_priv);
386 else
387 i8xx_fbc_deactivate(dev_priv);
388}
389
94b83957 390/**
0e631adc 391 * intel_fbc_is_active - Is FBC active?
7733b49b 392 * @dev_priv: i915 device instance
94b83957
RV
393 *
394 * This function is used to verify the current state of FBC.
2e7a5701 395 *
94b83957 396 * FIXME: This should be tracked in the plane config eventually
2e7a5701 397 * instead of queried at runtime for most callers.
94b83957 398 */
0e631adc 399bool intel_fbc_is_active(struct drm_i915_private *dev_priv)
7ff0ebcc 400{
0e631adc 401 return dev_priv->fbc.active;
7ff0ebcc
RV
402}
403
4a3d1e0f
CW
404static void intel_fbc_deactivate(struct drm_i915_private *dev_priv,
405 const char *reason)
25ad93fd 406{
ab34a7e8
PZ
407 struct intel_fbc *fbc = &dev_priv->fbc;
408
409 WARN_ON(!mutex_is_locked(&fbc->lock));
25ad93fd 410
ab34a7e8 411 if (fbc->active)
8c40074c 412 intel_fbc_hw_deactivate(dev_priv);
4a3d1e0f
CW
413
414 fbc->no_fbc_reason = reason;
754d1133
PZ
415}
416
7733b49b 417static int find_compression_threshold(struct drm_i915_private *dev_priv,
fc786728
PZ
418 struct drm_mm_node *node,
419 int size,
420 int fb_cpp)
421{
fc786728
PZ
422 int compression_threshold = 1;
423 int ret;
a9da512b
PZ
424 u64 end;
425
426 /* The FBC hardware for BDW/SKL doesn't have access to the stolen
427 * reserved range size, so it always assumes the maximum (8mb) is used.
428 * If we enable FBC using a CFB on that memory range we'll get FIFO
429 * underruns, even if that range is not reserved by the BIOS. */
b976dc53 430 if (IS_BROADWELL(dev_priv) || IS_GEN9_BC(dev_priv))
77894226 431 end = resource_size(&dev_priv->dsm) - 8 * 1024 * 1024;
a9da512b 432 else
3c6b29b2 433 end = U64_MAX;
fc786728
PZ
434
435 /* HACK: This code depends on what we will do in *_enable_fbc. If that
436 * code changes, this code needs to change as well.
437 *
438 * The enable_fbc code will attempt to use one of our 2 compression
439 * thresholds, therefore, in that case, we only have 1 resort.
440 */
441
442 /* Try to over-allocate to reduce reallocations and fragmentation. */
a9da512b
PZ
443 ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size <<= 1,
444 4096, 0, end);
fc786728
PZ
445 if (ret == 0)
446 return compression_threshold;
447
448again:
449 /* HW's ability to limit the CFB is 1:4 */
450 if (compression_threshold > 4 ||
451 (fb_cpp == 2 && compression_threshold == 2))
452 return 0;
453
a9da512b
PZ
454 ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size >>= 1,
455 4096, 0, end);
5697d60f 456 if (ret && INTEL_GEN(dev_priv) <= 4) {
fc786728
PZ
457 return 0;
458 } else if (ret) {
459 compression_threshold <<= 1;
460 goto again;
461 } else {
462 return compression_threshold;
463 }
464}
465
c5ecd469 466static int intel_fbc_alloc_cfb(struct intel_crtc *crtc)
fc786728 467{
fac5e23e 468 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
ab34a7e8 469 struct intel_fbc *fbc = &dev_priv->fbc;
fc786728 470 struct drm_mm_node *uninitialized_var(compressed_llb);
c5ecd469
PZ
471 int size, fb_cpp, ret;
472
ab34a7e8 473 WARN_ON(drm_mm_node_allocated(&fbc->compressed_fb));
c5ecd469 474
aaf78d27 475 size = intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache);
801c8fe8 476 fb_cpp = fbc->state_cache.fb.format->cpp[0];
fc786728 477
ab34a7e8 478 ret = find_compression_threshold(dev_priv, &fbc->compressed_fb,
fc786728
PZ
479 size, fb_cpp);
480 if (!ret)
481 goto err_llb;
482 else if (ret > 1) {
483 DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
484
485 }
486
ab34a7e8 487 fbc->threshold = ret;
fc786728 488
5697d60f 489 if (INTEL_GEN(dev_priv) >= 5)
ab34a7e8 490 I915_WRITE(ILK_DPFC_CB_BASE, fbc->compressed_fb.start);
7733b49b 491 else if (IS_GM45(dev_priv)) {
ab34a7e8 492 I915_WRITE(DPFC_CB_BASE, fbc->compressed_fb.start);
fc786728
PZ
493 } else {
494 compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
495 if (!compressed_llb)
496 goto err_fb;
497
498 ret = i915_gem_stolen_insert_node(dev_priv, compressed_llb,
499 4096, 4096);
500 if (ret)
501 goto err_fb;
502
ab34a7e8 503 fbc->compressed_llb = compressed_llb;
fc786728 504
77894226
MA
505 GEM_BUG_ON(range_overflows_t(u64, dev_priv->dsm.start,
506 fbc->compressed_fb.start,
507 U32_MAX));
508 GEM_BUG_ON(range_overflows_t(u64, dev_priv->dsm.start,
509 fbc->compressed_llb->start,
510 U32_MAX));
fc786728 511 I915_WRITE(FBC_CFB_BASE,
77894226 512 dev_priv->dsm.start + fbc->compressed_fb.start);
fc786728 513 I915_WRITE(FBC_LL_BASE,
77894226 514 dev_priv->dsm.start + compressed_llb->start);
fc786728
PZ
515 }
516
b8bf5d7f 517 DRM_DEBUG_KMS("reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n",
ab34a7e8 518 fbc->compressed_fb.size, fbc->threshold);
fc786728
PZ
519
520 return 0;
521
522err_fb:
523 kfree(compressed_llb);
ab34a7e8 524 i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb);
fc786728 525err_llb:
8d0e9bcb
CW
526 if (drm_mm_initialized(&dev_priv->mm.stolen))
527 pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
fc786728
PZ
528 return -ENOSPC;
529}
530
7733b49b 531static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
fc786728 532{
ab34a7e8
PZ
533 struct intel_fbc *fbc = &dev_priv->fbc;
534
535 if (drm_mm_node_allocated(&fbc->compressed_fb))
536 i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb);
537
538 if (fbc->compressed_llb) {
539 i915_gem_stolen_remove_node(dev_priv, fbc->compressed_llb);
540 kfree(fbc->compressed_llb);
fc786728 541 }
fc786728
PZ
542}
543
7733b49b 544void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
25ad93fd 545{
ab34a7e8
PZ
546 struct intel_fbc *fbc = &dev_priv->fbc;
547
9f218336 548 if (!fbc_supported(dev_priv))
0bf73c36
PZ
549 return;
550
ab34a7e8 551 mutex_lock(&fbc->lock);
7733b49b 552 __intel_fbc_cleanup_cfb(dev_priv);
ab34a7e8 553 mutex_unlock(&fbc->lock);
25ad93fd
PZ
554}
555
adf70c65
PZ
556static bool stride_is_valid(struct drm_i915_private *dev_priv,
557 unsigned int stride)
558{
3f5b933e
ML
559 /* This should have been caught earlier. */
560 if (WARN_ON_ONCE((stride & (64 - 1)) != 0))
561 return false;
adf70c65
PZ
562
563 /* Below are the additional FBC restrictions. */
3f5b933e
ML
564 if (stride < 512)
565 return false;
adf70c65 566
cf819eff 567 if (IS_GEN(dev_priv, 2) || IS_GEN(dev_priv, 3))
adf70c65
PZ
568 return stride == 4096 || stride == 8192;
569
cf819eff 570 if (IS_GEN(dev_priv, 4) && !IS_G4X(dev_priv) && stride < 2048)
adf70c65
PZ
571 return false;
572
573 if (stride > 16384)
574 return false;
575
576 return true;
577}
578
aaf78d27 579static bool pixel_format_is_valid(struct drm_i915_private *dev_priv,
739f3abd 580 u32 pixel_format)
b9e831dc 581{
aaf78d27 582 switch (pixel_format) {
b9e831dc
PZ
583 case DRM_FORMAT_XRGB8888:
584 case DRM_FORMAT_XBGR8888:
585 return true;
586 case DRM_FORMAT_XRGB1555:
587 case DRM_FORMAT_RGB565:
588 /* 16bpp not supported on gen2 */
cf819eff 589 if (IS_GEN(dev_priv, 2))
b9e831dc
PZ
590 return false;
591 /* WaFbcOnly1to1Ratio:ctg */
592 if (IS_G4X(dev_priv))
593 return false;
594 return true;
595 default:
596 return false;
597 }
598}
599
856312ae
PZ
600/*
601 * For some reason, the hardware tracking starts looking at whatever we
602 * programmed as the display plane base address register. It does not look at
603 * the X and Y offset registers. That's why we look at the crtc->adjusted{x,y}
604 * variables instead of just looking at the pipe/plane size.
605 */
606static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
3c5f174e 607{
fac5e23e 608 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
aaf78d27 609 struct intel_fbc *fbc = &dev_priv->fbc;
856312ae 610 unsigned int effective_w, effective_h, max_w, max_h;
3c5f174e 611
8d9d005d
JRS
612 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
613 max_w = 5120;
614 max_h = 4096;
615 } else if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv)) {
3c5f174e
PZ
616 max_w = 4096;
617 max_h = 4096;
5697d60f 618 } else if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
3c5f174e
PZ
619 max_w = 4096;
620 max_h = 2048;
621 } else {
622 max_w = 2048;
623 max_h = 1536;
624 }
625
aaf78d27
PZ
626 intel_fbc_get_plane_source_size(&fbc->state_cache, &effective_w,
627 &effective_h);
bf0a5d4b
JPH
628 effective_w += fbc->state_cache.plane.adjusted_x;
629 effective_h += fbc->state_cache.plane.adjusted_y;
856312ae
PZ
630
631 return effective_w <= max_w && effective_h <= max_h;
3c5f174e
PZ
632}
633
faf68d92 634static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
bee43ca4
VS
635 const struct intel_crtc_state *crtc_state,
636 const struct intel_plane_state *plane_state)
7ff0ebcc 637{
fac5e23e 638 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
ab34a7e8 639 struct intel_fbc *fbc = &dev_priv->fbc;
aaf78d27 640 struct intel_fbc_state_cache *cache = &fbc->state_cache;
7b3cb17a 641 struct drm_framebuffer *fb = plane_state->hw.fb;
be1e3415
CW
642
643 cache->vma = NULL;
1c9b6b13 644 cache->flags = 0;
7ff0ebcc 645
1326a92c 646 cache->crtc.mode_flags = crtc_state->hw.adjusted_mode.flags;
aaf78d27 647 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
a7d1b3f4 648 cache->crtc.hsw_bdw_pixel_rate = crtc_state->pixel_rate;
aaf78d27 649
7b3cb17a 650 cache->plane.rotation = plane_state->hw.rotation;
73714c05
VS
651 /*
652 * Src coordinates are already rotated by 270 degrees for
653 * the 90/270 degree plane rotation cases (to match the
654 * GTT mapping), hence no need to account for rotation here.
655 */
f90a85e7
ML
656 cache->plane.src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
657 cache->plane.src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
658 cache->plane.visible = plane_state->uapi.visible;
c11ada07
VS
659 cache->plane.adjusted_x = plane_state->color_plane[0].x;
660 cache->plane.adjusted_y = plane_state->color_plane[0].y;
f90a85e7 661 cache->plane.y = plane_state->uapi.src.y1 >> 16;
aaf78d27 662
7b3cb17a 663 cache->plane.pixel_blend_mode = plane_state->hw.pixel_blend_mode;
b2081525 664
aaf78d27
PZ
665 if (!cache->plane.visible)
666 return;
7ff0ebcc 667
801c8fe8 668 cache->fb.format = fb->format;
aaf78d27 669 cache->fb.stride = fb->pitches[0];
be1e3415
CW
670
671 cache->vma = plane_state->vma;
1c9b6b13
CW
672 cache->flags = plane_state->flags;
673 if (WARN_ON(cache->flags & PLANE_HAS_FENCE && !cache->vma->fence))
674 cache->flags &= ~PLANE_HAS_FENCE;
aaf78d27
PZ
675}
676
677static bool intel_fbc_can_activate(struct intel_crtc *crtc)
678{
fac5e23e 679 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
aaf78d27
PZ
680 struct intel_fbc *fbc = &dev_priv->fbc;
681 struct intel_fbc_state_cache *cache = &fbc->state_cache;
682
61a585d6
PZ
683 /* We don't need to use a state cache here since this information is
684 * global for all CRTC.
685 */
686 if (fbc->underrun_detected) {
687 fbc->no_fbc_reason = "underrun detected";
688 return false;
689 }
690
be1e3415 691 if (!cache->vma) {
913a3a6a 692 fbc->no_fbc_reason = "primary plane not visible";
615b40d7
PZ
693 return false;
694 }
7ff0ebcc 695
e995ca0b 696 if (cache->crtc.mode_flags & DRM_MODE_FLAG_INTERLACE) {
913a3a6a 697 fbc->no_fbc_reason = "incompatible mode";
615b40d7 698 return false;
7ff0ebcc
RV
699 }
700
45b32a29 701 if (!intel_fbc_hw_tracking_covers_screen(crtc)) {
913a3a6a 702 fbc->no_fbc_reason = "mode too large for compression";
615b40d7 703 return false;
7ff0ebcc 704 }
3c5f174e 705
7ff0ebcc
RV
706 /* The use of a CPU fence is mandatory in order to detect writes
707 * by the CPU to the scanout and trigger updates to the FBC.
2efb813d
CW
708 *
709 * Note that is possible for a tiled surface to be unmappable (and
710 * so have no fence associated with it) due to aperture constaints
711 * at the time of pinning.
61b8b359
VS
712 *
713 * FIXME with 90/270 degree rotation we should use the fence on
714 * the normal GTT view (the rotated view doesn't even have a
715 * fence). Would need changes to the FBC fence Y offset as well.
716 * For now this will effecively disable FBC with 90/270 degree
717 * rotation.
7ff0ebcc 718 */
1c9b6b13 719 if (!(cache->flags & PLANE_HAS_FENCE)) {
c82dd884
CW
720 fbc->no_fbc_reason = "framebuffer not tiled or fenced";
721 return false;
7ff0ebcc 722 }
5697d60f 723 if (INTEL_GEN(dev_priv) <= 4 && !IS_G4X(dev_priv) &&
c2c446ad 724 cache->plane.rotation != DRM_MODE_ROTATE_0) {
913a3a6a 725 fbc->no_fbc_reason = "rotation unsupported";
615b40d7 726 return false;
7ff0ebcc
RV
727 }
728
aaf78d27 729 if (!stride_is_valid(dev_priv, cache->fb.stride)) {
913a3a6a 730 fbc->no_fbc_reason = "framebuffer stride not supported";
615b40d7 731 return false;
adf70c65
PZ
732 }
733
801c8fe8 734 if (!pixel_format_is_valid(dev_priv, cache->fb.format->format)) {
913a3a6a 735 fbc->no_fbc_reason = "pixel format is invalid";
615b40d7 736 return false;
b9e831dc
PZ
737 }
738
b2081525
ML
739 if (cache->plane.pixel_blend_mode != DRM_MODE_BLEND_PIXEL_NONE &&
740 cache->fb.format->has_alpha) {
741 fbc->no_fbc_reason = "per-pixel alpha blending is incompatible with FBC";
742 return false;
743 }
744
7b24c9a6
PZ
745 /* WaFbcExceedCdClockThreshold:hsw,bdw */
746 if ((IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) &&
49cd97a3 747 cache->crtc.hsw_bdw_pixel_rate >= dev_priv->cdclk.hw.cdclk * 95 / 100) {
913a3a6a 748 fbc->no_fbc_reason = "pixel rate is too big";
615b40d7 749 return false;
7b24c9a6
PZ
750 }
751
c5ecd469
PZ
752 /* It is possible for the required CFB size change without a
753 * crtc->disable + crtc->enable since it is possible to change the
754 * stride without triggering a full modeset. Since we try to
755 * over-allocate the CFB, there's a chance we may keep FBC enabled even
756 * if this happens, but if we exceed the current CFB size we'll have to
757 * disable FBC. Notice that it would be possible to disable FBC, wait
758 * for a frame, free the stolen node, then try to reenable FBC in case
759 * we didn't get any invalidate/deactivate calls, but this would require
760 * a lot of tracking just for a specific case. If we conclude it's an
761 * important case, we can implement it later. */
aaf78d27 762 if (intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache) >
ab34a7e8 763 fbc->compressed_fb.size * fbc->threshold) {
913a3a6a 764 fbc->no_fbc_reason = "CFB requirements changed";
615b40d7
PZ
765 return false;
766 }
767
fee0fddc
ID
768 /*
769 * Work around a problem on GEN9+ HW, where enabling FBC on a plane
770 * having a Y offset that isn't divisible by 4 causes FIFO underrun
771 * and screen flicker.
772 */
00690008 773 if (IS_GEN_RANGE(dev_priv, 9, 10) &&
fee0fddc
ID
774 (fbc->state_cache.plane.adjusted_y & 3)) {
775 fbc->no_fbc_reason = "plane Y offset is misaligned";
776 return false;
777 }
778
615b40d7
PZ
779 return true;
780}
781
ee2be309 782static bool intel_fbc_can_enable(struct drm_i915_private *dev_priv)
44a8a257 783{
913a3a6a 784 struct intel_fbc *fbc = &dev_priv->fbc;
44a8a257 785
c033666a 786 if (intel_vgpu_active(dev_priv)) {
913a3a6a 787 fbc->no_fbc_reason = "VGPU is active";
44a8a257
PZ
788 return false;
789 }
790
4f044a88 791 if (!i915_modparams.enable_fbc) {
80788a0f 792 fbc->no_fbc_reason = "disabled per module param or by default";
44a8a257
PZ
793 return false;
794 }
795
61a585d6
PZ
796 if (fbc->underrun_detected) {
797 fbc->no_fbc_reason = "underrun detected";
798 return false;
799 }
800
ee2be309
PZ
801 return true;
802}
803
b183b3f1
PZ
804static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
805 struct intel_fbc_reg_params *params)
806{
fac5e23e 807 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
aaf78d27
PZ
808 struct intel_fbc *fbc = &dev_priv->fbc;
809 struct intel_fbc_state_cache *cache = &fbc->state_cache;
b183b3f1
PZ
810
811 /* Since all our fields are integer types, use memset here so the
812 * comparison function can rely on memcmp because the padding will be
813 * zero. */
814 memset(params, 0, sizeof(*params));
815
be1e3415 816 params->vma = cache->vma;
1c9b6b13 817 params->flags = cache->flags;
be1e3415 818
b183b3f1 819 params->crtc.pipe = crtc->pipe;
b1558c7e 820 params->crtc.i9xx_plane = to_intel_plane(crtc->base.primary)->i9xx_plane;
31d1d3c8 821 params->crtc.fence_y_offset = get_crtc_fence_y_offset(fbc);
b183b3f1 822
801c8fe8 823 params->fb.format = cache->fb.format;
aaf78d27 824 params->fb.stride = cache->fb.stride;
b183b3f1 825
aaf78d27 826 params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache);
5654a162 827
cf819eff 828 if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv))
5654a162
PP
829 params->gen9_wa_cfb_stride = DIV_ROUND_UP(cache->plane.src_w,
830 32 * fbc->threshold) * 8;
b183b3f1
PZ
831}
832
faf68d92 833void intel_fbc_pre_update(struct intel_crtc *crtc,
bee43ca4
VS
834 const struct intel_crtc_state *crtc_state,
835 const struct intel_plane_state *plane_state)
615b40d7 836{
fac5e23e 837 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
ab34a7e8 838 struct intel_fbc *fbc = &dev_priv->fbc;
4a3d1e0f 839 const char *reason = "update pending";
615b40d7 840
1eb52238
PZ
841 if (!fbc_supported(dev_priv))
842 return;
843
844 mutex_lock(&fbc->lock);
615b40d7 845
ab34a7e8 846 if (!fbc->enabled || fbc->crtc != crtc)
1eb52238 847 goto unlock;
615b40d7 848
faf68d92 849 intel_fbc_update_state_cache(crtc, crtc_state, plane_state);
c9855a56 850 fbc->flip_pending = true;
aaf78d27 851
4a3d1e0f 852 intel_fbc_deactivate(dev_priv, reason);
1eb52238
PZ
853unlock:
854 mutex_unlock(&fbc->lock);
212890cf
PZ
855}
856
949f7c7d
ML
857/**
858 * __intel_fbc_disable - disable FBC
859 * @dev_priv: i915 device instance
860 *
861 * This is the low level function that actually disables FBC. Callers should
862 * grab the FBC lock.
863 */
864static void __intel_fbc_disable(struct drm_i915_private *dev_priv)
865{
866 struct intel_fbc *fbc = &dev_priv->fbc;
867 struct intel_crtc *crtc = fbc->crtc;
868
869 WARN_ON(!mutex_is_locked(&fbc->lock));
870 WARN_ON(!fbc->enabled);
871 WARN_ON(fbc->active);
872
873 DRM_DEBUG_KMS("Disabling FBC on pipe %c\n", pipe_name(crtc->pipe));
874
875 __intel_fbc_cleanup_cfb(dev_priv);
876
877 fbc->enabled = false;
878 fbc->crtc = NULL;
879}
880
1eb52238 881static void __intel_fbc_post_update(struct intel_crtc *crtc)
212890cf 882{
fac5e23e 883 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
212890cf 884 struct intel_fbc *fbc = &dev_priv->fbc;
212890cf
PZ
885
886 WARN_ON(!mutex_is_locked(&fbc->lock));
887
888 if (!fbc->enabled || fbc->crtc != crtc)
889 return;
890
c9855a56
ML
891 fbc->flip_pending = false;
892 WARN_ON(fbc->active);
893
949f7c7d
ML
894 if (!i915_modparams.enable_fbc) {
895 intel_fbc_deactivate(dev_priv, "disabled at runtime per module param");
896 __intel_fbc_disable(dev_priv);
897
898 return;
899 }
900
ab34a7e8 901 intel_fbc_get_reg_params(crtc, &fbc->params);
b183b3f1 902
c9855a56 903 if (!intel_fbc_can_activate(crtc))
7ff0ebcc
RV
904 return;
905
c9855a56
ML
906 if (!fbc->busy_bits) {
907 intel_fbc_deactivate(dev_priv, "FBC enabled (active or scheduled)");
45720959 908 intel_fbc_hw_activate(dev_priv);
c9855a56
ML
909 } else
910 intel_fbc_deactivate(dev_priv, "frontbuffer write");
25ad93fd
PZ
911}
912
1eb52238 913void intel_fbc_post_update(struct intel_crtc *crtc)
25ad93fd 914{
fac5e23e 915 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
ab34a7e8 916 struct intel_fbc *fbc = &dev_priv->fbc;
754d1133 917
9f218336 918 if (!fbc_supported(dev_priv))
0bf73c36
PZ
919 return;
920
ab34a7e8 921 mutex_lock(&fbc->lock);
1eb52238 922 __intel_fbc_post_update(crtc);
ab34a7e8 923 mutex_unlock(&fbc->lock);
7ff0ebcc
RV
924}
925
261fe99a
PZ
926static unsigned int intel_fbc_get_frontbuffer_bit(struct intel_fbc *fbc)
927{
928 if (fbc->enabled)
929 return to_intel_plane(fbc->crtc->base.primary)->frontbuffer_bit;
930 else
931 return fbc->possible_framebuffer_bits;
932}
933
dbef0f15
PZ
934void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
935 unsigned int frontbuffer_bits,
936 enum fb_op_origin origin)
937{
ab34a7e8 938 struct intel_fbc *fbc = &dev_priv->fbc;
dbef0f15 939
9f218336 940 if (!fbc_supported(dev_priv))
0bf73c36
PZ
941 return;
942
0dd81544 943 if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP)
dbef0f15
PZ
944 return;
945
ab34a7e8 946 mutex_lock(&fbc->lock);
25ad93fd 947
261fe99a 948 fbc->busy_bits |= intel_fbc_get_frontbuffer_bit(fbc) & frontbuffer_bits;
dbef0f15 949
5bc40472 950 if (fbc->enabled && fbc->busy_bits)
4a3d1e0f 951 intel_fbc_deactivate(dev_priv, "frontbuffer write");
25ad93fd 952
ab34a7e8 953 mutex_unlock(&fbc->lock);
dbef0f15
PZ
954}
955
956void intel_fbc_flush(struct drm_i915_private *dev_priv,
6f4551fe 957 unsigned int frontbuffer_bits, enum fb_op_origin origin)
dbef0f15 958{
ab34a7e8
PZ
959 struct intel_fbc *fbc = &dev_priv->fbc;
960
9f218336 961 if (!fbc_supported(dev_priv))
0bf73c36
PZ
962 return;
963
ab34a7e8 964 mutex_lock(&fbc->lock);
dbef0f15 965
ab34a7e8 966 fbc->busy_bits &= ~frontbuffer_bits;
dbef0f15 967
ab28a547
PZ
968 if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP)
969 goto out;
970
261fe99a
PZ
971 if (!fbc->busy_bits && fbc->enabled &&
972 (frontbuffer_bits & intel_fbc_get_frontbuffer_bit(fbc))) {
0dd81544 973 if (fbc->active)
ee7d6cfa 974 intel_fbc_recompress(dev_priv);
c9855a56 975 else if (!fbc->flip_pending)
1eb52238 976 __intel_fbc_post_update(fbc->crtc);
6f4551fe 977 }
25ad93fd 978
ab28a547 979out:
ab34a7e8 980 mutex_unlock(&fbc->lock);
dbef0f15
PZ
981}
982
f51be2e0
PZ
983/**
984 * intel_fbc_choose_crtc - select a CRTC to enable FBC on
985 * @dev_priv: i915 device instance
986 * @state: the atomic state structure
987 *
988 * This function looks at the proposed state for CRTCs and planes, then chooses
989 * which pipe is going to have FBC by setting intel_crtc_state->enable_fbc to
990 * true.
991 *
992 * Later, intel_fbc_enable is going to look for state->enable_fbc and then maybe
993 * enable FBC for the chosen CRTC. If it does, it will set dev_priv->fbc.crtc.
994 */
995void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
dd57602e 996 struct intel_atomic_state *state)
f51be2e0
PZ
997{
998 struct intel_fbc *fbc = &dev_priv->fbc;
dd57602e
VS
999 struct intel_plane *plane;
1000 struct intel_plane_state *plane_state;
4f8f2251 1001 bool crtc_chosen = false;
ba67fab0 1002 int i;
f51be2e0
PZ
1003
1004 mutex_lock(&fbc->lock);
1005
4f8f2251
PZ
1006 /* Does this atomic commit involve the CRTC currently tied to FBC? */
1007 if (fbc->crtc &&
dd57602e 1008 !intel_atomic_get_new_crtc_state(state, fbc->crtc))
f51be2e0
PZ
1009 goto out;
1010
ee2be309
PZ
1011 if (!intel_fbc_can_enable(dev_priv))
1012 goto out;
1013
f51be2e0
PZ
1014 /* Simply choose the first CRTC that is compatible and has a visible
1015 * plane. We could go for fancier schemes such as checking the plane
1016 * size, but this would just affect the few platforms that don't tie FBC
1017 * to pipe or plane A. */
dd57602e
VS
1018 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
1019 struct intel_crtc_state *crtc_state;
7b3cb17a 1020 struct intel_crtc *crtc = to_intel_crtc(plane_state->hw.crtc);
f51be2e0 1021
cf1805e6 1022 if (!plane->has_fbc)
f51be2e0
PZ
1023 continue;
1024
f90a85e7 1025 if (!plane_state->uapi.visible)
03e39104
PZ
1026 continue;
1027
dd57602e 1028 crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
f51be2e0 1029
dd57602e 1030 crtc_state->enable_fbc = true;
f7e9b004 1031 crtc_chosen = true;
ba67fab0 1032 break;
f51be2e0
PZ
1033 }
1034
f7e9b004
PZ
1035 if (!crtc_chosen)
1036 fbc->no_fbc_reason = "no suitable CRTC for FBC";
1037
f51be2e0
PZ
1038out:
1039 mutex_unlock(&fbc->lock);
1040}
1041
d029bcad
PZ
1042/**
1043 * intel_fbc_enable: tries to enable FBC on the CRTC
1044 * @crtc: the CRTC
62f90b38
DV
1045 * @crtc_state: corresponding &drm_crtc_state for @crtc
1046 * @plane_state: corresponding &drm_plane_state for the primary plane of @crtc
d029bcad 1047 *
f51be2e0 1048 * This function checks if the given CRTC was chosen for FBC, then enables it if
49227c4a
PZ
1049 * possible. Notice that it doesn't activate FBC. It is valid to call
1050 * intel_fbc_enable multiple times for the same pipe without an
1051 * intel_fbc_disable in the middle, as long as it is deactivated.
d029bcad 1052 */
faf68d92 1053void intel_fbc_enable(struct intel_crtc *crtc,
bee43ca4
VS
1054 const struct intel_crtc_state *crtc_state,
1055 const struct intel_plane_state *plane_state)
d029bcad 1056{
fac5e23e 1057 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
ab34a7e8 1058 struct intel_fbc *fbc = &dev_priv->fbc;
d029bcad
PZ
1059
1060 if (!fbc_supported(dev_priv))
1061 return;
1062
ab34a7e8 1063 mutex_lock(&fbc->lock);
d029bcad 1064
ab34a7e8 1065 if (fbc->enabled) {
49227c4a
PZ
1066 WARN_ON(fbc->crtc == NULL);
1067 if (fbc->crtc == crtc) {
faf68d92 1068 WARN_ON(!crtc_state->enable_fbc);
49227c4a
PZ
1069 WARN_ON(fbc->active);
1070 }
d029bcad
PZ
1071 goto out;
1072 }
1073
faf68d92 1074 if (!crtc_state->enable_fbc)
f51be2e0
PZ
1075 goto out;
1076
ab34a7e8
PZ
1077 WARN_ON(fbc->active);
1078 WARN_ON(fbc->crtc != NULL);
d029bcad 1079
faf68d92 1080 intel_fbc_update_state_cache(crtc, crtc_state, plane_state);
c5ecd469 1081 if (intel_fbc_alloc_cfb(crtc)) {
913a3a6a 1082 fbc->no_fbc_reason = "not enough stolen memory";
c5ecd469
PZ
1083 goto out;
1084 }
1085
d029bcad 1086 DRM_DEBUG_KMS("Enabling FBC on pipe %c\n", pipe_name(crtc->pipe));
ab34a7e8 1087 fbc->no_fbc_reason = "FBC enabled but not active yet\n";
d029bcad 1088
ab34a7e8
PZ
1089 fbc->enabled = true;
1090 fbc->crtc = crtc;
d029bcad 1091out:
ab34a7e8 1092 mutex_unlock(&fbc->lock);
d029bcad
PZ
1093}
1094
d029bcad 1095/**
c937ab3e 1096 * intel_fbc_disable - disable FBC if it's associated with crtc
d029bcad
PZ
1097 * @crtc: the CRTC
1098 *
1099 * This function disables FBC if it's associated with the provided CRTC.
1100 */
c937ab3e 1101void intel_fbc_disable(struct intel_crtc *crtc)
d029bcad 1102{
fac5e23e 1103 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
ab34a7e8 1104 struct intel_fbc *fbc = &dev_priv->fbc;
d029bcad
PZ
1105
1106 if (!fbc_supported(dev_priv))
1107 return;
1108
ab34a7e8 1109 mutex_lock(&fbc->lock);
4da45616 1110 if (fbc->crtc == crtc)
d029bcad 1111 __intel_fbc_disable(dev_priv);
ab34a7e8 1112 mutex_unlock(&fbc->lock);
d029bcad
PZ
1113}
1114
1115/**
c937ab3e 1116 * intel_fbc_global_disable - globally disable FBC
d029bcad
PZ
1117 * @dev_priv: i915 device instance
1118 *
1119 * This function disables FBC regardless of which CRTC is associated with it.
1120 */
c937ab3e 1121void intel_fbc_global_disable(struct drm_i915_private *dev_priv)
d029bcad 1122{
ab34a7e8
PZ
1123 struct intel_fbc *fbc = &dev_priv->fbc;
1124
d029bcad
PZ
1125 if (!fbc_supported(dev_priv))
1126 return;
1127
ab34a7e8 1128 mutex_lock(&fbc->lock);
949f7c7d
ML
1129 if (fbc->enabled) {
1130 WARN_ON(fbc->crtc->active);
d029bcad 1131 __intel_fbc_disable(dev_priv);
949f7c7d 1132 }
ab34a7e8 1133 mutex_unlock(&fbc->lock);
d029bcad
PZ
1134}
1135
61a585d6
PZ
1136static void intel_fbc_underrun_work_fn(struct work_struct *work)
1137{
1138 struct drm_i915_private *dev_priv =
1139 container_of(work, struct drm_i915_private, fbc.underrun_work);
1140 struct intel_fbc *fbc = &dev_priv->fbc;
1141
1142 mutex_lock(&fbc->lock);
1143
1144 /* Maybe we were scheduled twice. */
2ae9e365 1145 if (fbc->underrun_detected || !fbc->enabled)
61a585d6
PZ
1146 goto out;
1147
1148 DRM_DEBUG_KMS("Disabling FBC due to FIFO underrun.\n");
1149 fbc->underrun_detected = true;
1150
4a3d1e0f 1151 intel_fbc_deactivate(dev_priv, "FIFO underrun");
61a585d6
PZ
1152out:
1153 mutex_unlock(&fbc->lock);
1154}
1155
d52ad9cb
ML
1156/*
1157 * intel_fbc_reset_underrun - reset FBC fifo underrun status.
1158 * @dev_priv: i915 device instance
1159 *
1160 * See intel_fbc_handle_fifo_underrun_irq(). For automated testing we
1161 * want to re-enable FBC after an underrun to increase test coverage.
1162 */
1163int intel_fbc_reset_underrun(struct drm_i915_private *dev_priv)
1164{
1165 int ret;
1166
1167 cancel_work_sync(&dev_priv->fbc.underrun_work);
1168
1169 ret = mutex_lock_interruptible(&dev_priv->fbc.lock);
1170 if (ret)
1171 return ret;
1172
1173 if (dev_priv->fbc.underrun_detected) {
1174 DRM_DEBUG_KMS("Re-allowing FBC after fifo underrun\n");
1175 dev_priv->fbc.no_fbc_reason = "FIFO underrun cleared";
1176 }
1177
1178 dev_priv->fbc.underrun_detected = false;
1179 mutex_unlock(&dev_priv->fbc.lock);
1180
1181 return 0;
1182}
1183
61a585d6
PZ
1184/**
1185 * intel_fbc_handle_fifo_underrun_irq - disable FBC when we get a FIFO underrun
1186 * @dev_priv: i915 device instance
1187 *
1188 * Without FBC, most underruns are harmless and don't really cause too many
1189 * problems, except for an annoying message on dmesg. With FBC, underruns can
1190 * become black screens or even worse, especially when paired with bad
1191 * watermarks. So in order for us to be on the safe side, completely disable FBC
1192 * in case we ever detect a FIFO underrun on any pipe. An underrun on any pipe
1193 * already suggests that watermarks may be bad, so try to be as safe as
1194 * possible.
1195 *
1196 * This function is called from the IRQ handler.
1197 */
1198void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv)
1199{
1200 struct intel_fbc *fbc = &dev_priv->fbc;
1201
1202 if (!fbc_supported(dev_priv))
1203 return;
1204
1205 /* There's no guarantee that underrun_detected won't be set to true
1206 * right after this check and before the work is scheduled, but that's
1207 * not a problem since we'll check it again under the work function
1208 * while FBC is locked. This check here is just to prevent us from
1209 * unnecessarily scheduling the work, and it relies on the fact that we
1210 * never switch underrun_detect back to false after it's true. */
1211 if (READ_ONCE(fbc->underrun_detected))
1212 return;
1213
1214 schedule_work(&fbc->underrun_work);
1215}
1216
80788a0f
PZ
1217/*
1218 * The DDX driver changes its behavior depending on the value it reads from
1219 * i915.enable_fbc, so sanitize it by translating the default value into either
1220 * 0 or 1 in order to allow it to know what's going on.
1221 *
1222 * Notice that this is done at driver initialization and we still allow user
1223 * space to change the value during runtime without sanitizing it again. IGT
1224 * relies on being able to change i915.enable_fbc at runtime.
1225 */
1226static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv)
1227{
4f044a88
MW
1228 if (i915_modparams.enable_fbc >= 0)
1229 return !!i915_modparams.enable_fbc;
80788a0f 1230
36dbc4d7
CW
1231 if (!HAS_FBC(dev_priv))
1232 return 0;
1233
1d25724b 1234 /* https://bugs.freedesktop.org/show_bug.cgi?id=108085 */
cd8c021b 1235 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
1d25724b
DD
1236 return 0;
1237
fd7d6c5c 1238 if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9)
80788a0f
PZ
1239 return 1;
1240
1241 return 0;
1242}
1243
36dbc4d7
CW
1244static bool need_fbc_vtd_wa(struct drm_i915_private *dev_priv)
1245{
36dbc4d7 1246 /* WaFbcTurnOffFbcWhenHyperVisorIsUsed:skl,bxt */
80debff8 1247 if (intel_vtd_active() &&
36dbc4d7
CW
1248 (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))) {
1249 DRM_INFO("Disabling framebuffer compression (FBC) to prevent screen flicker with VT-d enabled\n");
1250 return true;
1251 }
36dbc4d7
CW
1252
1253 return false;
1254}
1255
94b83957
RV
1256/**
1257 * intel_fbc_init - Initialize FBC
1258 * @dev_priv: the i915 device
1259 *
1260 * This function might be called during PM init process.
1261 */
7ff0ebcc
RV
1262void intel_fbc_init(struct drm_i915_private *dev_priv)
1263{
ab34a7e8 1264 struct intel_fbc *fbc = &dev_priv->fbc;
dbef0f15 1265
61a585d6 1266 INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn);
ab34a7e8
PZ
1267 mutex_init(&fbc->lock);
1268 fbc->enabled = false;
1269 fbc->active = false;
25ad93fd 1270
e3cb653d
CW
1271 if (!drm_mm_initialized(&dev_priv->mm.stolen))
1272 mkwrite_device_info(dev_priv)->display.has_fbc = false;
1273
36dbc4d7 1274 if (need_fbc_vtd_wa(dev_priv))
d53db442 1275 mkwrite_device_info(dev_priv)->display.has_fbc = false;
36dbc4d7 1276
4f044a88
MW
1277 i915_modparams.enable_fbc = intel_sanitize_fbc_option(dev_priv);
1278 DRM_DEBUG_KMS("Sanitized enable_fbc value: %d\n",
1279 i915_modparams.enable_fbc);
80788a0f 1280
7ff0ebcc 1281 if (!HAS_FBC(dev_priv)) {
ab34a7e8 1282 fbc->no_fbc_reason = "unsupported by this chipset";
7ff0ebcc
RV
1283 return;
1284 }
1285
8c40074c 1286 /* This value was pulled out of someone's hat */
5697d60f 1287 if (INTEL_GEN(dev_priv) <= 4 && !IS_GM45(dev_priv))
7ff0ebcc 1288 I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
7ff0ebcc 1289
b07ea0fa 1290 /* We still don't have any sort of hardware state readout for FBC, so
0e631adc
PZ
1291 * deactivate it in case the BIOS activated it to make sure software
1292 * matches the hardware state. */
8c40074c
PZ
1293 if (intel_fbc_hw_is_active(dev_priv))
1294 intel_fbc_hw_deactivate(dev_priv);
7ff0ebcc 1295}