Commit | Line | Data |
---|---|---|
7ff0ebcc RV |
1 | /* |
2 | * Copyright © 2014 Intel Corporation | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | |
21 | * DEALINGS IN THE SOFTWARE. | |
22 | */ | |
23 | ||
94b83957 RV |
24 | /** |
25 | * DOC: Frame Buffer Compression (FBC) | |
26 | * | |
27 | * FBC tries to save memory bandwidth (and so power consumption) by | |
28 | * compressing the amount of memory used by the display. It is total | |
29 | * transparent to user space and completely handled in the kernel. | |
7ff0ebcc RV |
30 | * |
31 | * The benefits of FBC are mostly visible with solid backgrounds and | |
94b83957 RV |
32 | * variation-less patterns. It comes from keeping the memory footprint small |
33 | * and having fewer memory pages opened and accessed for refreshing the display. | |
7ff0ebcc | 34 | * |
94b83957 RV |
35 | * i915 is responsible to reserve stolen memory for FBC and configure its |
36 | * offset on proper registers. The hardware takes care of all | |
37 | * compress/decompress. However there are many known cases where we have to | |
38 | * forcibly disable it to allow proper screen updates. | |
7ff0ebcc RV |
39 | */ |
40 | ||
94b83957 RV |
41 | #include "intel_drv.h" |
42 | #include "i915_drv.h" | |
43 | ||
9f218336 PZ |
44 | static inline bool fbc_supported(struct drm_i915_private *dev_priv) |
45 | { | |
8c40074c | 46 | return HAS_FBC(dev_priv); |
9f218336 PZ |
47 | } |
48 | ||
57105022 PZ |
49 | static inline bool fbc_on_pipe_a_only(struct drm_i915_private *dev_priv) |
50 | { | |
5697d60f | 51 | return IS_HASWELL(dev_priv) || INTEL_GEN(dev_priv) >= 8; |
57105022 PZ |
52 | } |
53 | ||
e6cd6dc1 PZ |
54 | static inline bool fbc_on_plane_a_only(struct drm_i915_private *dev_priv) |
55 | { | |
5697d60f | 56 | return INTEL_GEN(dev_priv) < 4; |
e6cd6dc1 PZ |
57 | } |
58 | ||
010cf73d PZ |
59 | static inline bool no_fbc_on_multiple_pipes(struct drm_i915_private *dev_priv) |
60 | { | |
5697d60f | 61 | return INTEL_GEN(dev_priv) <= 3; |
010cf73d PZ |
62 | } |
63 | ||
2db3366b PZ |
64 | /* |
65 | * In some platforms where the CRTC's x:0/y:0 coordinates doesn't match the | |
66 | * frontbuffer's x:0/y:0 coordinates we lie to the hardware about the plane's | |
67 | * origin so the x and y offsets can actually fit the registers. As a | |
68 | * consequence, the fence doesn't really start exactly at the display plane | |
69 | * address we program because it starts at the real start of the buffer, so we | |
70 | * have to take this into consideration here. | |
71 | */ | |
72 | static unsigned int get_crtc_fence_y_offset(struct intel_crtc *crtc) | |
73 | { | |
74 | return crtc->base.y - crtc->adjusted_y; | |
75 | } | |
76 | ||
c5ecd469 PZ |
77 | /* |
78 | * For SKL+, the plane source size used by the hardware is based on the value we | |
79 | * write to the PLANE_SIZE register. For BDW-, the hardware looks at the value | |
80 | * we wrote to PIPESRC. | |
81 | */ | |
aaf78d27 | 82 | static void intel_fbc_get_plane_source_size(struct intel_fbc_state_cache *cache, |
c5ecd469 PZ |
83 | int *width, int *height) |
84 | { | |
c5ecd469 | 85 | if (width) |
73714c05 | 86 | *width = cache->plane.src_w; |
c5ecd469 | 87 | if (height) |
73714c05 | 88 | *height = cache->plane.src_h; |
c5ecd469 PZ |
89 | } |
90 | ||
aaf78d27 PZ |
91 | static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv, |
92 | struct intel_fbc_state_cache *cache) | |
c5ecd469 | 93 | { |
c5ecd469 PZ |
94 | int lines; |
95 | ||
aaf78d27 | 96 | intel_fbc_get_plane_source_size(cache, NULL, &lines); |
79f2624b | 97 | if (INTEL_GEN(dev_priv) == 7) |
c5ecd469 | 98 | lines = min(lines, 2048); |
79f2624b PZ |
99 | else if (INTEL_GEN(dev_priv) >= 8) |
100 | lines = min(lines, 2560); | |
c5ecd469 PZ |
101 | |
102 | /* Hardware needs the full buffer stride, not just the active area. */ | |
aaf78d27 | 103 | return lines * cache->fb.stride; |
c5ecd469 PZ |
104 | } |
105 | ||
0e631adc | 106 | static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv) |
7ff0ebcc | 107 | { |
7ff0ebcc RV |
108 | u32 fbc_ctl; |
109 | ||
7ff0ebcc RV |
110 | /* Disable compression */ |
111 | fbc_ctl = I915_READ(FBC_CONTROL); | |
112 | if ((fbc_ctl & FBC_CTL_EN) == 0) | |
113 | return; | |
114 | ||
115 | fbc_ctl &= ~FBC_CTL_EN; | |
116 | I915_WRITE(FBC_CONTROL, fbc_ctl); | |
117 | ||
118 | /* Wait for compressing bit to clear */ | |
8d90dfd5 CW |
119 | if (intel_wait_for_register(dev_priv, |
120 | FBC_STATUS, FBC_STAT_COMPRESSING, 0, | |
121 | 10)) { | |
7ff0ebcc RV |
122 | DRM_DEBUG_KMS("FBC idle timed out\n"); |
123 | return; | |
124 | } | |
7ff0ebcc RV |
125 | } |
126 | ||
b183b3f1 | 127 | static void i8xx_fbc_activate(struct drm_i915_private *dev_priv) |
7ff0ebcc | 128 | { |
b183b3f1 | 129 | struct intel_fbc_reg_params *params = &dev_priv->fbc.params; |
7ff0ebcc RV |
130 | int cfb_pitch; |
131 | int i; | |
132 | u32 fbc_ctl; | |
133 | ||
60ee5cd2 | 134 | /* Note: fbc.threshold == 1 for i8xx */ |
b183b3f1 PZ |
135 | cfb_pitch = params->cfb_size / FBC_LL_SIZE; |
136 | if (params->fb.stride < cfb_pitch) | |
137 | cfb_pitch = params->fb.stride; | |
7ff0ebcc RV |
138 | |
139 | /* FBC_CTL wants 32B or 64B units */ | |
7733b49b | 140 | if (IS_GEN2(dev_priv)) |
7ff0ebcc RV |
141 | cfb_pitch = (cfb_pitch / 32) - 1; |
142 | else | |
143 | cfb_pitch = (cfb_pitch / 64) - 1; | |
144 | ||
145 | /* Clear old tags */ | |
146 | for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++) | |
4d110c71 | 147 | I915_WRITE(FBC_TAG(i), 0); |
7ff0ebcc | 148 | |
7733b49b | 149 | if (IS_GEN4(dev_priv)) { |
7ff0ebcc RV |
150 | u32 fbc_ctl2; |
151 | ||
152 | /* Set it up... */ | |
153 | fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE; | |
b183b3f1 | 154 | fbc_ctl2 |= FBC_CTL_PLANE(params->crtc.plane); |
7ff0ebcc | 155 | I915_WRITE(FBC_CONTROL2, fbc_ctl2); |
b183b3f1 | 156 | I915_WRITE(FBC_FENCE_OFF, params->crtc.fence_y_offset); |
7ff0ebcc RV |
157 | } |
158 | ||
159 | /* enable it... */ | |
160 | fbc_ctl = I915_READ(FBC_CONTROL); | |
161 | fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT; | |
162 | fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC; | |
7733b49b | 163 | if (IS_I945GM(dev_priv)) |
7ff0ebcc RV |
164 | fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ |
165 | fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; | |
be1e3415 | 166 | fbc_ctl |= params->vma->fence->id; |
7ff0ebcc | 167 | I915_WRITE(FBC_CONTROL, fbc_ctl); |
7ff0ebcc RV |
168 | } |
169 | ||
0e631adc | 170 | static bool i8xx_fbc_is_active(struct drm_i915_private *dev_priv) |
7ff0ebcc | 171 | { |
7ff0ebcc RV |
172 | return I915_READ(FBC_CONTROL) & FBC_CTL_EN; |
173 | } | |
174 | ||
b183b3f1 | 175 | static void g4x_fbc_activate(struct drm_i915_private *dev_priv) |
7ff0ebcc | 176 | { |
b183b3f1 | 177 | struct intel_fbc_reg_params *params = &dev_priv->fbc.params; |
7ff0ebcc RV |
178 | u32 dpfc_ctl; |
179 | ||
b183b3f1 | 180 | dpfc_ctl = DPFC_CTL_PLANE(params->crtc.plane) | DPFC_SR_EN; |
801c8fe8 | 181 | if (params->fb.format->cpp[0] == 2) |
7ff0ebcc RV |
182 | dpfc_ctl |= DPFC_CTL_LIMIT_2X; |
183 | else | |
184 | dpfc_ctl |= DPFC_CTL_LIMIT_1X; | |
7ff0ebcc | 185 | |
be1e3415 CW |
186 | if (params->vma->fence) { |
187 | dpfc_ctl |= DPFC_CTL_FENCE_EN | params->vma->fence->id; | |
12ecf4b9 CW |
188 | I915_WRITE(DPFC_FENCE_YOFF, params->crtc.fence_y_offset); |
189 | } else { | |
190 | I915_WRITE(DPFC_FENCE_YOFF, 0); | |
191 | } | |
7ff0ebcc RV |
192 | |
193 | /* enable it... */ | |
194 | I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); | |
7ff0ebcc RV |
195 | } |
196 | ||
0e631adc | 197 | static void g4x_fbc_deactivate(struct drm_i915_private *dev_priv) |
7ff0ebcc | 198 | { |
7ff0ebcc RV |
199 | u32 dpfc_ctl; |
200 | ||
7ff0ebcc RV |
201 | /* Disable compression */ |
202 | dpfc_ctl = I915_READ(DPFC_CONTROL); | |
203 | if (dpfc_ctl & DPFC_CTL_EN) { | |
204 | dpfc_ctl &= ~DPFC_CTL_EN; | |
205 | I915_WRITE(DPFC_CONTROL, dpfc_ctl); | |
7ff0ebcc RV |
206 | } |
207 | } | |
208 | ||
0e631adc | 209 | static bool g4x_fbc_is_active(struct drm_i915_private *dev_priv) |
7ff0ebcc | 210 | { |
7ff0ebcc RV |
211 | return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN; |
212 | } | |
213 | ||
d5ce4164 PZ |
214 | /* This function forces a CFB recompression through the nuke operation. */ |
215 | static void intel_fbc_recompress(struct drm_i915_private *dev_priv) | |
7ff0ebcc | 216 | { |
dbef0f15 PZ |
217 | I915_WRITE(MSG_FBC_REND_STATE, FBC_REND_NUKE); |
218 | POSTING_READ(MSG_FBC_REND_STATE); | |
7ff0ebcc RV |
219 | } |
220 | ||
b183b3f1 | 221 | static void ilk_fbc_activate(struct drm_i915_private *dev_priv) |
7ff0ebcc | 222 | { |
b183b3f1 | 223 | struct intel_fbc_reg_params *params = &dev_priv->fbc.params; |
7ff0ebcc | 224 | u32 dpfc_ctl; |
ce65e47b | 225 | int threshold = dev_priv->fbc.threshold; |
7ff0ebcc | 226 | |
b183b3f1 | 227 | dpfc_ctl = DPFC_CTL_PLANE(params->crtc.plane); |
801c8fe8 | 228 | if (params->fb.format->cpp[0] == 2) |
ce65e47b | 229 | threshold++; |
7ff0ebcc | 230 | |
ce65e47b | 231 | switch (threshold) { |
7ff0ebcc RV |
232 | case 4: |
233 | case 3: | |
234 | dpfc_ctl |= DPFC_CTL_LIMIT_4X; | |
235 | break; | |
236 | case 2: | |
237 | dpfc_ctl |= DPFC_CTL_LIMIT_2X; | |
238 | break; | |
239 | case 1: | |
240 | dpfc_ctl |= DPFC_CTL_LIMIT_1X; | |
241 | break; | |
242 | } | |
12ecf4b9 | 243 | |
be1e3415 | 244 | if (params->vma->fence) { |
12ecf4b9 CW |
245 | dpfc_ctl |= DPFC_CTL_FENCE_EN; |
246 | if (IS_GEN5(dev_priv)) | |
be1e3415 | 247 | dpfc_ctl |= params->vma->fence->id; |
12ecf4b9 CW |
248 | if (IS_GEN6(dev_priv)) { |
249 | I915_WRITE(SNB_DPFC_CTL_SA, | |
be1e3415 CW |
250 | SNB_CPU_FENCE_ENABLE | |
251 | params->vma->fence->id); | |
12ecf4b9 CW |
252 | I915_WRITE(DPFC_CPU_FENCE_OFFSET, |
253 | params->crtc.fence_y_offset); | |
254 | } | |
255 | } else { | |
256 | if (IS_GEN6(dev_priv)) { | |
257 | I915_WRITE(SNB_DPFC_CTL_SA, 0); | |
258 | I915_WRITE(DPFC_CPU_FENCE_OFFSET, 0); | |
259 | } | |
260 | } | |
7ff0ebcc | 261 | |
b183b3f1 | 262 | I915_WRITE(ILK_DPFC_FENCE_YOFF, params->crtc.fence_y_offset); |
be1e3415 CW |
263 | I915_WRITE(ILK_FBC_RT_BASE, |
264 | i915_ggtt_offset(params->vma) | ILK_FBC_RT_VALID); | |
7ff0ebcc RV |
265 | /* enable it... */ |
266 | I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); | |
267 | ||
d5ce4164 | 268 | intel_fbc_recompress(dev_priv); |
7ff0ebcc RV |
269 | } |
270 | ||
0e631adc | 271 | static void ilk_fbc_deactivate(struct drm_i915_private *dev_priv) |
7ff0ebcc | 272 | { |
7ff0ebcc RV |
273 | u32 dpfc_ctl; |
274 | ||
7ff0ebcc RV |
275 | /* Disable compression */ |
276 | dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); | |
277 | if (dpfc_ctl & DPFC_CTL_EN) { | |
278 | dpfc_ctl &= ~DPFC_CTL_EN; | |
279 | I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl); | |
7ff0ebcc RV |
280 | } |
281 | } | |
282 | ||
0e631adc | 283 | static bool ilk_fbc_is_active(struct drm_i915_private *dev_priv) |
7ff0ebcc | 284 | { |
7ff0ebcc RV |
285 | return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN; |
286 | } | |
287 | ||
b183b3f1 | 288 | static void gen7_fbc_activate(struct drm_i915_private *dev_priv) |
7ff0ebcc | 289 | { |
b183b3f1 | 290 | struct intel_fbc_reg_params *params = &dev_priv->fbc.params; |
7ff0ebcc | 291 | u32 dpfc_ctl; |
ce65e47b | 292 | int threshold = dev_priv->fbc.threshold; |
7ff0ebcc | 293 | |
d8514d63 | 294 | dpfc_ctl = 0; |
7733b49b | 295 | if (IS_IVYBRIDGE(dev_priv)) |
b183b3f1 | 296 | dpfc_ctl |= IVB_DPFC_CTL_PLANE(params->crtc.plane); |
d8514d63 | 297 | |
801c8fe8 | 298 | if (params->fb.format->cpp[0] == 2) |
ce65e47b | 299 | threshold++; |
7ff0ebcc | 300 | |
ce65e47b | 301 | switch (threshold) { |
7ff0ebcc RV |
302 | case 4: |
303 | case 3: | |
304 | dpfc_ctl |= DPFC_CTL_LIMIT_4X; | |
305 | break; | |
306 | case 2: | |
307 | dpfc_ctl |= DPFC_CTL_LIMIT_2X; | |
308 | break; | |
309 | case 1: | |
310 | dpfc_ctl |= DPFC_CTL_LIMIT_1X; | |
311 | break; | |
312 | } | |
313 | ||
be1e3415 | 314 | if (params->vma->fence) { |
12ecf4b9 CW |
315 | dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN; |
316 | I915_WRITE(SNB_DPFC_CTL_SA, | |
be1e3415 CW |
317 | SNB_CPU_FENCE_ENABLE | |
318 | params->vma->fence->id); | |
12ecf4b9 CW |
319 | I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset); |
320 | } else { | |
321 | I915_WRITE(SNB_DPFC_CTL_SA,0); | |
322 | I915_WRITE(DPFC_CPU_FENCE_OFFSET, 0); | |
323 | } | |
7ff0ebcc RV |
324 | |
325 | if (dev_priv->fbc.false_color) | |
326 | dpfc_ctl |= FBC_CTL_FALSE_COLOR; | |
327 | ||
7733b49b | 328 | if (IS_IVYBRIDGE(dev_priv)) { |
7ff0ebcc RV |
329 | /* WaFbcAsynchFlipDisableFbcQueue:ivb */ |
330 | I915_WRITE(ILK_DISPLAY_CHICKEN1, | |
331 | I915_READ(ILK_DISPLAY_CHICKEN1) | | |
332 | ILK_FBCQ_DIS); | |
40f4022e | 333 | } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { |
7ff0ebcc | 334 | /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */ |
b183b3f1 PZ |
335 | I915_WRITE(CHICKEN_PIPESL_1(params->crtc.pipe), |
336 | I915_READ(CHICKEN_PIPESL_1(params->crtc.pipe)) | | |
7ff0ebcc RV |
337 | HSW_FBCQ_DIS); |
338 | } | |
339 | ||
57012be9 PZ |
340 | I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); |
341 | ||
d5ce4164 | 342 | intel_fbc_recompress(dev_priv); |
7ff0ebcc RV |
343 | } |
344 | ||
8c40074c PZ |
345 | static bool intel_fbc_hw_is_active(struct drm_i915_private *dev_priv) |
346 | { | |
5697d60f | 347 | if (INTEL_GEN(dev_priv) >= 5) |
8c40074c PZ |
348 | return ilk_fbc_is_active(dev_priv); |
349 | else if (IS_GM45(dev_priv)) | |
350 | return g4x_fbc_is_active(dev_priv); | |
351 | else | |
352 | return i8xx_fbc_is_active(dev_priv); | |
353 | } | |
354 | ||
355 | static void intel_fbc_hw_activate(struct drm_i915_private *dev_priv) | |
356 | { | |
5375ce9f PZ |
357 | struct intel_fbc *fbc = &dev_priv->fbc; |
358 | ||
359 | fbc->active = true; | |
360 | ||
5697d60f | 361 | if (INTEL_GEN(dev_priv) >= 7) |
8c40074c | 362 | gen7_fbc_activate(dev_priv); |
5697d60f | 363 | else if (INTEL_GEN(dev_priv) >= 5) |
8c40074c PZ |
364 | ilk_fbc_activate(dev_priv); |
365 | else if (IS_GM45(dev_priv)) | |
366 | g4x_fbc_activate(dev_priv); | |
367 | else | |
368 | i8xx_fbc_activate(dev_priv); | |
369 | } | |
370 | ||
371 | static void intel_fbc_hw_deactivate(struct drm_i915_private *dev_priv) | |
372 | { | |
5375ce9f PZ |
373 | struct intel_fbc *fbc = &dev_priv->fbc; |
374 | ||
375 | fbc->active = false; | |
376 | ||
5697d60f | 377 | if (INTEL_GEN(dev_priv) >= 5) |
8c40074c PZ |
378 | ilk_fbc_deactivate(dev_priv); |
379 | else if (IS_GM45(dev_priv)) | |
380 | g4x_fbc_deactivate(dev_priv); | |
381 | else | |
382 | i8xx_fbc_deactivate(dev_priv); | |
383 | } | |
384 | ||
94b83957 | 385 | /** |
0e631adc | 386 | * intel_fbc_is_active - Is FBC active? |
7733b49b | 387 | * @dev_priv: i915 device instance |
94b83957 RV |
388 | * |
389 | * This function is used to verify the current state of FBC. | |
2e7a5701 | 390 | * |
94b83957 | 391 | * FIXME: This should be tracked in the plane config eventually |
2e7a5701 | 392 | * instead of queried at runtime for most callers. |
94b83957 | 393 | */ |
0e631adc | 394 | bool intel_fbc_is_active(struct drm_i915_private *dev_priv) |
7ff0ebcc | 395 | { |
0e631adc | 396 | return dev_priv->fbc.active; |
7ff0ebcc RV |
397 | } |
398 | ||
7ff0ebcc RV |
399 | static void intel_fbc_work_fn(struct work_struct *__work) |
400 | { | |
128d7356 PZ |
401 | struct drm_i915_private *dev_priv = |
402 | container_of(__work, struct drm_i915_private, fbc.work.work); | |
ab34a7e8 PZ |
403 | struct intel_fbc *fbc = &dev_priv->fbc; |
404 | struct intel_fbc_work *work = &fbc->work; | |
405 | struct intel_crtc *crtc = fbc->crtc; | |
91c8a326 | 406 | struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[crtc->pipe]; |
ca18d51d PZ |
407 | |
408 | if (drm_crtc_vblank_get(&crtc->base)) { | |
409 | DRM_ERROR("vblank not available for FBC on pipe %c\n", | |
410 | pipe_name(crtc->pipe)); | |
411 | ||
ab34a7e8 | 412 | mutex_lock(&fbc->lock); |
ca18d51d | 413 | work->scheduled = false; |
ab34a7e8 | 414 | mutex_unlock(&fbc->lock); |
ca18d51d PZ |
415 | return; |
416 | } | |
128d7356 PZ |
417 | |
418 | retry: | |
419 | /* Delay the actual enabling to let pageflipping cease and the | |
420 | * display to settle before starting the compression. Note that | |
421 | * this delay also serves a second purpose: it allows for a | |
422 | * vblank to pass after disabling the FBC before we attempt | |
423 | * to modify the control registers. | |
424 | * | |
128d7356 | 425 | * WaFbcWaitForVBlankBeforeEnable:ilk,snb |
ca18d51d PZ |
426 | * |
427 | * It is also worth mentioning that since work->scheduled_vblank can be | |
428 | * updated multiple times by the other threads, hitting the timeout is | |
429 | * not an error condition. We'll just end up hitting the "goto retry" | |
430 | * case below. | |
128d7356 | 431 | */ |
ca18d51d PZ |
432 | wait_event_timeout(vblank->queue, |
433 | drm_crtc_vblank_count(&crtc->base) != work->scheduled_vblank, | |
434 | msecs_to_jiffies(50)); | |
7ff0ebcc | 435 | |
ab34a7e8 | 436 | mutex_lock(&fbc->lock); |
7ff0ebcc | 437 | |
128d7356 PZ |
438 | /* Were we cancelled? */ |
439 | if (!work->scheduled) | |
440 | goto out; | |
441 | ||
442 | /* Were we delayed again while this function was sleeping? */ | |
ca18d51d | 443 | if (drm_crtc_vblank_count(&crtc->base) == work->scheduled_vblank) { |
ab34a7e8 | 444 | mutex_unlock(&fbc->lock); |
128d7356 | 445 | goto retry; |
7ff0ebcc | 446 | } |
7ff0ebcc | 447 | |
8c40074c | 448 | intel_fbc_hw_activate(dev_priv); |
128d7356 PZ |
449 | |
450 | work->scheduled = false; | |
451 | ||
452 | out: | |
ab34a7e8 | 453 | mutex_unlock(&fbc->lock); |
ca18d51d | 454 | drm_crtc_vblank_put(&crtc->base); |
7ff0ebcc RV |
455 | } |
456 | ||
0e631adc | 457 | static void intel_fbc_schedule_activation(struct intel_crtc *crtc) |
7ff0ebcc | 458 | { |
fac5e23e | 459 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
ab34a7e8 PZ |
460 | struct intel_fbc *fbc = &dev_priv->fbc; |
461 | struct intel_fbc_work *work = &fbc->work; | |
7ff0ebcc | 462 | |
ab34a7e8 | 463 | WARN_ON(!mutex_is_locked(&fbc->lock)); |
25ad93fd | 464 | |
ca18d51d PZ |
465 | if (drm_crtc_vblank_get(&crtc->base)) { |
466 | DRM_ERROR("vblank not available for FBC on pipe %c\n", | |
467 | pipe_name(crtc->pipe)); | |
468 | return; | |
469 | } | |
470 | ||
e35be23f PZ |
471 | /* It is useless to call intel_fbc_cancel_work() or cancel_work() in |
472 | * this function since we're not releasing fbc.lock, so it won't have an | |
473 | * opportunity to grab it to discover that it was cancelled. So we just | |
474 | * update the expected jiffy count. */ | |
128d7356 | 475 | work->scheduled = true; |
ca18d51d PZ |
476 | work->scheduled_vblank = drm_crtc_vblank_count(&crtc->base); |
477 | drm_crtc_vblank_put(&crtc->base); | |
7ff0ebcc | 478 | |
128d7356 | 479 | schedule_work(&work->work); |
7ff0ebcc RV |
480 | } |
481 | ||
60eb2cc7 | 482 | static void intel_fbc_deactivate(struct drm_i915_private *dev_priv) |
25ad93fd | 483 | { |
ab34a7e8 PZ |
484 | struct intel_fbc *fbc = &dev_priv->fbc; |
485 | ||
486 | WARN_ON(!mutex_is_locked(&fbc->lock)); | |
25ad93fd | 487 | |
e35be23f PZ |
488 | /* Calling cancel_work() here won't help due to the fact that the work |
489 | * function grabs fbc->lock. Just set scheduled to false so the work | |
490 | * function can know it was cancelled. */ | |
491 | fbc->work.scheduled = false; | |
25ad93fd | 492 | |
ab34a7e8 | 493 | if (fbc->active) |
8c40074c | 494 | intel_fbc_hw_deactivate(dev_priv); |
754d1133 PZ |
495 | } |
496 | ||
faf68d92 ML |
497 | static bool multiple_pipes_ok(struct intel_crtc *crtc, |
498 | struct intel_plane_state *plane_state) | |
232fd934 | 499 | { |
faf68d92 | 500 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
010cf73d PZ |
501 | struct intel_fbc *fbc = &dev_priv->fbc; |
502 | enum pipe pipe = crtc->pipe; | |
232fd934 | 503 | |
010cf73d PZ |
504 | /* Don't even bother tracking anything we don't need. */ |
505 | if (!no_fbc_on_multiple_pipes(dev_priv)) | |
232fd934 PZ |
506 | return true; |
507 | ||
936e71e3 | 508 | if (plane_state->base.visible) |
010cf73d PZ |
509 | fbc->visible_pipes_mask |= (1 << pipe); |
510 | else | |
511 | fbc->visible_pipes_mask &= ~(1 << pipe); | |
232fd934 | 512 | |
010cf73d | 513 | return (fbc->visible_pipes_mask & ~(1 << pipe)) != 0; |
232fd934 PZ |
514 | } |
515 | ||
7733b49b | 516 | static int find_compression_threshold(struct drm_i915_private *dev_priv, |
fc786728 PZ |
517 | struct drm_mm_node *node, |
518 | int size, | |
519 | int fb_cpp) | |
520 | { | |
72e96d64 | 521 | struct i915_ggtt *ggtt = &dev_priv->ggtt; |
fc786728 PZ |
522 | int compression_threshold = 1; |
523 | int ret; | |
a9da512b PZ |
524 | u64 end; |
525 | ||
526 | /* The FBC hardware for BDW/SKL doesn't have access to the stolen | |
527 | * reserved range size, so it always assumes the maximum (8mb) is used. | |
528 | * If we enable FBC using a CFB on that memory range we'll get FIFO | |
529 | * underruns, even if that range is not reserved by the BIOS. */ | |
b976dc53 | 530 | if (IS_BROADWELL(dev_priv) || IS_GEN9_BC(dev_priv)) |
72e96d64 | 531 | end = ggtt->stolen_size - 8 * 1024 * 1024; |
a9da512b | 532 | else |
3c6b29b2 | 533 | end = U64_MAX; |
fc786728 PZ |
534 | |
535 | /* HACK: This code depends on what we will do in *_enable_fbc. If that | |
536 | * code changes, this code needs to change as well. | |
537 | * | |
538 | * The enable_fbc code will attempt to use one of our 2 compression | |
539 | * thresholds, therefore, in that case, we only have 1 resort. | |
540 | */ | |
541 | ||
542 | /* Try to over-allocate to reduce reallocations and fragmentation. */ | |
a9da512b PZ |
543 | ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size <<= 1, |
544 | 4096, 0, end); | |
fc786728 PZ |
545 | if (ret == 0) |
546 | return compression_threshold; | |
547 | ||
548 | again: | |
549 | /* HW's ability to limit the CFB is 1:4 */ | |
550 | if (compression_threshold > 4 || | |
551 | (fb_cpp == 2 && compression_threshold == 2)) | |
552 | return 0; | |
553 | ||
a9da512b PZ |
554 | ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size >>= 1, |
555 | 4096, 0, end); | |
5697d60f | 556 | if (ret && INTEL_GEN(dev_priv) <= 4) { |
fc786728 PZ |
557 | return 0; |
558 | } else if (ret) { | |
559 | compression_threshold <<= 1; | |
560 | goto again; | |
561 | } else { | |
562 | return compression_threshold; | |
563 | } | |
564 | } | |
565 | ||
c5ecd469 | 566 | static int intel_fbc_alloc_cfb(struct intel_crtc *crtc) |
fc786728 | 567 | { |
fac5e23e | 568 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
ab34a7e8 | 569 | struct intel_fbc *fbc = &dev_priv->fbc; |
fc786728 | 570 | struct drm_mm_node *uninitialized_var(compressed_llb); |
c5ecd469 PZ |
571 | int size, fb_cpp, ret; |
572 | ||
ab34a7e8 | 573 | WARN_ON(drm_mm_node_allocated(&fbc->compressed_fb)); |
c5ecd469 | 574 | |
aaf78d27 | 575 | size = intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache); |
801c8fe8 | 576 | fb_cpp = fbc->state_cache.fb.format->cpp[0]; |
fc786728 | 577 | |
ab34a7e8 | 578 | ret = find_compression_threshold(dev_priv, &fbc->compressed_fb, |
fc786728 PZ |
579 | size, fb_cpp); |
580 | if (!ret) | |
581 | goto err_llb; | |
582 | else if (ret > 1) { | |
583 | DRM_INFO("Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n"); | |
584 | ||
585 | } | |
586 | ||
ab34a7e8 | 587 | fbc->threshold = ret; |
fc786728 | 588 | |
5697d60f | 589 | if (INTEL_GEN(dev_priv) >= 5) |
ab34a7e8 | 590 | I915_WRITE(ILK_DPFC_CB_BASE, fbc->compressed_fb.start); |
7733b49b | 591 | else if (IS_GM45(dev_priv)) { |
ab34a7e8 | 592 | I915_WRITE(DPFC_CB_BASE, fbc->compressed_fb.start); |
fc786728 PZ |
593 | } else { |
594 | compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL); | |
595 | if (!compressed_llb) | |
596 | goto err_fb; | |
597 | ||
598 | ret = i915_gem_stolen_insert_node(dev_priv, compressed_llb, | |
599 | 4096, 4096); | |
600 | if (ret) | |
601 | goto err_fb; | |
602 | ||
ab34a7e8 | 603 | fbc->compressed_llb = compressed_llb; |
fc786728 PZ |
604 | |
605 | I915_WRITE(FBC_CFB_BASE, | |
ab34a7e8 | 606 | dev_priv->mm.stolen_base + fbc->compressed_fb.start); |
fc786728 PZ |
607 | I915_WRITE(FBC_LL_BASE, |
608 | dev_priv->mm.stolen_base + compressed_llb->start); | |
609 | } | |
610 | ||
b8bf5d7f | 611 | DRM_DEBUG_KMS("reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n", |
ab34a7e8 | 612 | fbc->compressed_fb.size, fbc->threshold); |
fc786728 PZ |
613 | |
614 | return 0; | |
615 | ||
616 | err_fb: | |
617 | kfree(compressed_llb); | |
ab34a7e8 | 618 | i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb); |
fc786728 | 619 | err_llb: |
8d0e9bcb CW |
620 | if (drm_mm_initialized(&dev_priv->mm.stolen)) |
621 | pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size); | |
fc786728 PZ |
622 | return -ENOSPC; |
623 | } | |
624 | ||
7733b49b | 625 | static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv) |
fc786728 | 626 | { |
ab34a7e8 PZ |
627 | struct intel_fbc *fbc = &dev_priv->fbc; |
628 | ||
629 | if (drm_mm_node_allocated(&fbc->compressed_fb)) | |
630 | i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb); | |
631 | ||
632 | if (fbc->compressed_llb) { | |
633 | i915_gem_stolen_remove_node(dev_priv, fbc->compressed_llb); | |
634 | kfree(fbc->compressed_llb); | |
fc786728 | 635 | } |
fc786728 PZ |
636 | } |
637 | ||
7733b49b | 638 | void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv) |
25ad93fd | 639 | { |
ab34a7e8 PZ |
640 | struct intel_fbc *fbc = &dev_priv->fbc; |
641 | ||
9f218336 | 642 | if (!fbc_supported(dev_priv)) |
0bf73c36 PZ |
643 | return; |
644 | ||
ab34a7e8 | 645 | mutex_lock(&fbc->lock); |
7733b49b | 646 | __intel_fbc_cleanup_cfb(dev_priv); |
ab34a7e8 | 647 | mutex_unlock(&fbc->lock); |
25ad93fd PZ |
648 | } |
649 | ||
adf70c65 PZ |
650 | static bool stride_is_valid(struct drm_i915_private *dev_priv, |
651 | unsigned int stride) | |
652 | { | |
653 | /* These should have been caught earlier. */ | |
654 | WARN_ON(stride < 512); | |
655 | WARN_ON((stride & (64 - 1)) != 0); | |
656 | ||
657 | /* Below are the additional FBC restrictions. */ | |
658 | ||
659 | if (IS_GEN2(dev_priv) || IS_GEN3(dev_priv)) | |
660 | return stride == 4096 || stride == 8192; | |
661 | ||
662 | if (IS_GEN4(dev_priv) && !IS_G4X(dev_priv) && stride < 2048) | |
663 | return false; | |
664 | ||
665 | if (stride > 16384) | |
666 | return false; | |
667 | ||
668 | return true; | |
669 | } | |
670 | ||
aaf78d27 PZ |
671 | static bool pixel_format_is_valid(struct drm_i915_private *dev_priv, |
672 | uint32_t pixel_format) | |
b9e831dc | 673 | { |
aaf78d27 | 674 | switch (pixel_format) { |
b9e831dc PZ |
675 | case DRM_FORMAT_XRGB8888: |
676 | case DRM_FORMAT_XBGR8888: | |
677 | return true; | |
678 | case DRM_FORMAT_XRGB1555: | |
679 | case DRM_FORMAT_RGB565: | |
680 | /* 16bpp not supported on gen2 */ | |
aaf78d27 | 681 | if (IS_GEN2(dev_priv)) |
b9e831dc PZ |
682 | return false; |
683 | /* WaFbcOnly1to1Ratio:ctg */ | |
684 | if (IS_G4X(dev_priv)) | |
685 | return false; | |
686 | return true; | |
687 | default: | |
688 | return false; | |
689 | } | |
690 | } | |
691 | ||
856312ae PZ |
692 | /* |
693 | * For some reason, the hardware tracking starts looking at whatever we | |
694 | * programmed as the display plane base address register. It does not look at | |
695 | * the X and Y offset registers. That's why we look at the crtc->adjusted{x,y} | |
696 | * variables instead of just looking at the pipe/plane size. | |
697 | */ | |
698 | static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc) | |
3c5f174e | 699 | { |
fac5e23e | 700 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
aaf78d27 | 701 | struct intel_fbc *fbc = &dev_priv->fbc; |
856312ae | 702 | unsigned int effective_w, effective_h, max_w, max_h; |
3c5f174e | 703 | |
5697d60f | 704 | if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv)) { |
3c5f174e PZ |
705 | max_w = 4096; |
706 | max_h = 4096; | |
5697d60f | 707 | } else if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { |
3c5f174e PZ |
708 | max_w = 4096; |
709 | max_h = 2048; | |
710 | } else { | |
711 | max_w = 2048; | |
712 | max_h = 1536; | |
713 | } | |
714 | ||
aaf78d27 PZ |
715 | intel_fbc_get_plane_source_size(&fbc->state_cache, &effective_w, |
716 | &effective_h); | |
856312ae PZ |
717 | effective_w += crtc->adjusted_x; |
718 | effective_h += crtc->adjusted_y; | |
719 | ||
720 | return effective_w <= max_w && effective_h <= max_h; | |
3c5f174e PZ |
721 | } |
722 | ||
faf68d92 ML |
723 | static void intel_fbc_update_state_cache(struct intel_crtc *crtc, |
724 | struct intel_crtc_state *crtc_state, | |
725 | struct intel_plane_state *plane_state) | |
7ff0ebcc | 726 | { |
fac5e23e | 727 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
ab34a7e8 | 728 | struct intel_fbc *fbc = &dev_priv->fbc; |
aaf78d27 | 729 | struct intel_fbc_state_cache *cache = &fbc->state_cache; |
aaf78d27 | 730 | struct drm_framebuffer *fb = plane_state->base.fb; |
be1e3415 CW |
731 | |
732 | cache->vma = NULL; | |
7ff0ebcc | 733 | |
aaf78d27 PZ |
734 | cache->crtc.mode_flags = crtc_state->base.adjusted_mode.flags; |
735 | if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) | |
a7d1b3f4 | 736 | cache->crtc.hsw_bdw_pixel_rate = crtc_state->pixel_rate; |
aaf78d27 PZ |
737 | |
738 | cache->plane.rotation = plane_state->base.rotation; | |
73714c05 VS |
739 | /* |
740 | * Src coordinates are already rotated by 270 degrees for | |
741 | * the 90/270 degree plane rotation cases (to match the | |
742 | * GTT mapping), hence no need to account for rotation here. | |
743 | */ | |
936e71e3 VS |
744 | cache->plane.src_w = drm_rect_width(&plane_state->base.src) >> 16; |
745 | cache->plane.src_h = drm_rect_height(&plane_state->base.src) >> 16; | |
746 | cache->plane.visible = plane_state->base.visible; | |
aaf78d27 PZ |
747 | |
748 | if (!cache->plane.visible) | |
749 | return; | |
7ff0ebcc | 750 | |
801c8fe8 | 751 | cache->fb.format = fb->format; |
aaf78d27 | 752 | cache->fb.stride = fb->pitches[0]; |
be1e3415 CW |
753 | |
754 | cache->vma = plane_state->vma; | |
aaf78d27 PZ |
755 | } |
756 | ||
757 | static bool intel_fbc_can_activate(struct intel_crtc *crtc) | |
758 | { | |
fac5e23e | 759 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
aaf78d27 PZ |
760 | struct intel_fbc *fbc = &dev_priv->fbc; |
761 | struct intel_fbc_state_cache *cache = &fbc->state_cache; | |
762 | ||
61a585d6 PZ |
763 | /* We don't need to use a state cache here since this information is |
764 | * global for all CRTC. | |
765 | */ | |
766 | if (fbc->underrun_detected) { | |
767 | fbc->no_fbc_reason = "underrun detected"; | |
768 | return false; | |
769 | } | |
770 | ||
be1e3415 | 771 | if (!cache->vma) { |
913a3a6a | 772 | fbc->no_fbc_reason = "primary plane not visible"; |
615b40d7 PZ |
773 | return false; |
774 | } | |
7ff0ebcc | 775 | |
aaf78d27 PZ |
776 | if ((cache->crtc.mode_flags & DRM_MODE_FLAG_INTERLACE) || |
777 | (cache->crtc.mode_flags & DRM_MODE_FLAG_DBLSCAN)) { | |
913a3a6a | 778 | fbc->no_fbc_reason = "incompatible mode"; |
615b40d7 | 779 | return false; |
7ff0ebcc RV |
780 | } |
781 | ||
45b32a29 | 782 | if (!intel_fbc_hw_tracking_covers_screen(crtc)) { |
913a3a6a | 783 | fbc->no_fbc_reason = "mode too large for compression"; |
615b40d7 | 784 | return false; |
7ff0ebcc | 785 | } |
3c5f174e | 786 | |
7ff0ebcc RV |
787 | /* The use of a CPU fence is mandatory in order to detect writes |
788 | * by the CPU to the scanout and trigger updates to the FBC. | |
2efb813d CW |
789 | * |
790 | * Note that is possible for a tiled surface to be unmappable (and | |
791 | * so have no fence associated with it) due to aperture constaints | |
792 | * at the time of pinning. | |
7ff0ebcc | 793 | */ |
be1e3415 | 794 | if (!cache->vma->fence) { |
c82dd884 CW |
795 | fbc->no_fbc_reason = "framebuffer not tiled or fenced"; |
796 | return false; | |
7ff0ebcc | 797 | } |
5697d60f | 798 | if (INTEL_GEN(dev_priv) <= 4 && !IS_G4X(dev_priv) && |
c2c446ad | 799 | cache->plane.rotation != DRM_MODE_ROTATE_0) { |
913a3a6a | 800 | fbc->no_fbc_reason = "rotation unsupported"; |
615b40d7 | 801 | return false; |
7ff0ebcc RV |
802 | } |
803 | ||
aaf78d27 | 804 | if (!stride_is_valid(dev_priv, cache->fb.stride)) { |
913a3a6a | 805 | fbc->no_fbc_reason = "framebuffer stride not supported"; |
615b40d7 | 806 | return false; |
adf70c65 PZ |
807 | } |
808 | ||
801c8fe8 | 809 | if (!pixel_format_is_valid(dev_priv, cache->fb.format->format)) { |
913a3a6a | 810 | fbc->no_fbc_reason = "pixel format is invalid"; |
615b40d7 | 811 | return false; |
b9e831dc PZ |
812 | } |
813 | ||
7b24c9a6 PZ |
814 | /* WaFbcExceedCdClockThreshold:hsw,bdw */ |
815 | if ((IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) && | |
49cd97a3 | 816 | cache->crtc.hsw_bdw_pixel_rate >= dev_priv->cdclk.hw.cdclk * 95 / 100) { |
913a3a6a | 817 | fbc->no_fbc_reason = "pixel rate is too big"; |
615b40d7 | 818 | return false; |
7b24c9a6 PZ |
819 | } |
820 | ||
c5ecd469 PZ |
821 | /* It is possible for the required CFB size change without a |
822 | * crtc->disable + crtc->enable since it is possible to change the | |
823 | * stride without triggering a full modeset. Since we try to | |
824 | * over-allocate the CFB, there's a chance we may keep FBC enabled even | |
825 | * if this happens, but if we exceed the current CFB size we'll have to | |
826 | * disable FBC. Notice that it would be possible to disable FBC, wait | |
827 | * for a frame, free the stolen node, then try to reenable FBC in case | |
828 | * we didn't get any invalidate/deactivate calls, but this would require | |
829 | * a lot of tracking just for a specific case. If we conclude it's an | |
830 | * important case, we can implement it later. */ | |
aaf78d27 | 831 | if (intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache) > |
ab34a7e8 | 832 | fbc->compressed_fb.size * fbc->threshold) { |
913a3a6a | 833 | fbc->no_fbc_reason = "CFB requirements changed"; |
615b40d7 PZ |
834 | return false; |
835 | } | |
836 | ||
837 | return true; | |
838 | } | |
839 | ||
ee2be309 | 840 | static bool intel_fbc_can_enable(struct drm_i915_private *dev_priv) |
44a8a257 | 841 | { |
913a3a6a | 842 | struct intel_fbc *fbc = &dev_priv->fbc; |
44a8a257 | 843 | |
c033666a | 844 | if (intel_vgpu_active(dev_priv)) { |
913a3a6a | 845 | fbc->no_fbc_reason = "VGPU is active"; |
44a8a257 PZ |
846 | return false; |
847 | } | |
848 | ||
44a8a257 | 849 | if (!i915.enable_fbc) { |
80788a0f | 850 | fbc->no_fbc_reason = "disabled per module param or by default"; |
44a8a257 PZ |
851 | return false; |
852 | } | |
853 | ||
61a585d6 PZ |
854 | if (fbc->underrun_detected) { |
855 | fbc->no_fbc_reason = "underrun detected"; | |
856 | return false; | |
857 | } | |
858 | ||
ee2be309 PZ |
859 | return true; |
860 | } | |
861 | ||
b183b3f1 PZ |
862 | static void intel_fbc_get_reg_params(struct intel_crtc *crtc, |
863 | struct intel_fbc_reg_params *params) | |
864 | { | |
fac5e23e | 865 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
aaf78d27 PZ |
866 | struct intel_fbc *fbc = &dev_priv->fbc; |
867 | struct intel_fbc_state_cache *cache = &fbc->state_cache; | |
b183b3f1 PZ |
868 | |
869 | /* Since all our fields are integer types, use memset here so the | |
870 | * comparison function can rely on memcmp because the padding will be | |
871 | * zero. */ | |
872 | memset(params, 0, sizeof(*params)); | |
873 | ||
be1e3415 CW |
874 | params->vma = cache->vma; |
875 | ||
b183b3f1 PZ |
876 | params->crtc.pipe = crtc->pipe; |
877 | params->crtc.plane = crtc->plane; | |
878 | params->crtc.fence_y_offset = get_crtc_fence_y_offset(crtc); | |
879 | ||
801c8fe8 | 880 | params->fb.format = cache->fb.format; |
aaf78d27 | 881 | params->fb.stride = cache->fb.stride; |
b183b3f1 | 882 | |
aaf78d27 | 883 | params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache); |
b183b3f1 PZ |
884 | } |
885 | ||
886 | static bool intel_fbc_reg_params_equal(struct intel_fbc_reg_params *params1, | |
887 | struct intel_fbc_reg_params *params2) | |
888 | { | |
889 | /* We can use this since intel_fbc_get_reg_params() does a memset. */ | |
890 | return memcmp(params1, params2, sizeof(*params1)) == 0; | |
891 | } | |
892 | ||
faf68d92 ML |
893 | void intel_fbc_pre_update(struct intel_crtc *crtc, |
894 | struct intel_crtc_state *crtc_state, | |
895 | struct intel_plane_state *plane_state) | |
615b40d7 | 896 | { |
fac5e23e | 897 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
ab34a7e8 | 898 | struct intel_fbc *fbc = &dev_priv->fbc; |
615b40d7 | 899 | |
1eb52238 PZ |
900 | if (!fbc_supported(dev_priv)) |
901 | return; | |
902 | ||
903 | mutex_lock(&fbc->lock); | |
615b40d7 | 904 | |
faf68d92 | 905 | if (!multiple_pipes_ok(crtc, plane_state)) { |
913a3a6a | 906 | fbc->no_fbc_reason = "more than one pipe active"; |
212890cf | 907 | goto deactivate; |
7ff0ebcc RV |
908 | } |
909 | ||
ab34a7e8 | 910 | if (!fbc->enabled || fbc->crtc != crtc) |
1eb52238 | 911 | goto unlock; |
615b40d7 | 912 | |
faf68d92 | 913 | intel_fbc_update_state_cache(crtc, crtc_state, plane_state); |
aaf78d27 | 914 | |
212890cf | 915 | deactivate: |
60eb2cc7 | 916 | intel_fbc_deactivate(dev_priv); |
1eb52238 PZ |
917 | unlock: |
918 | mutex_unlock(&fbc->lock); | |
212890cf PZ |
919 | } |
920 | ||
1eb52238 | 921 | static void __intel_fbc_post_update(struct intel_crtc *crtc) |
212890cf | 922 | { |
fac5e23e | 923 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
212890cf PZ |
924 | struct intel_fbc *fbc = &dev_priv->fbc; |
925 | struct intel_fbc_reg_params old_params; | |
926 | ||
927 | WARN_ON(!mutex_is_locked(&fbc->lock)); | |
928 | ||
929 | if (!fbc->enabled || fbc->crtc != crtc) | |
930 | return; | |
931 | ||
932 | if (!intel_fbc_can_activate(crtc)) { | |
933 | WARN_ON(fbc->active); | |
934 | return; | |
935 | } | |
615b40d7 | 936 | |
ab34a7e8 PZ |
937 | old_params = fbc->params; |
938 | intel_fbc_get_reg_params(crtc, &fbc->params); | |
b183b3f1 | 939 | |
7ff0ebcc RV |
940 | /* If the scanout has not changed, don't modify the FBC settings. |
941 | * Note that we make the fundamental assumption that the fb->obj | |
942 | * cannot be unpinned (and have its GTT offset and fence revoked) | |
943 | * without first being decoupled from the scanout and FBC disabled. | |
944 | */ | |
ab34a7e8 PZ |
945 | if (fbc->active && |
946 | intel_fbc_reg_params_equal(&old_params, &fbc->params)) | |
7ff0ebcc RV |
947 | return; |
948 | ||
60eb2cc7 | 949 | intel_fbc_deactivate(dev_priv); |
0e631adc | 950 | intel_fbc_schedule_activation(crtc); |
212890cf | 951 | fbc->no_fbc_reason = "FBC enabled (active or scheduled)"; |
25ad93fd PZ |
952 | } |
953 | ||
1eb52238 | 954 | void intel_fbc_post_update(struct intel_crtc *crtc) |
25ad93fd | 955 | { |
fac5e23e | 956 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
ab34a7e8 | 957 | struct intel_fbc *fbc = &dev_priv->fbc; |
754d1133 | 958 | |
9f218336 | 959 | if (!fbc_supported(dev_priv)) |
0bf73c36 PZ |
960 | return; |
961 | ||
ab34a7e8 | 962 | mutex_lock(&fbc->lock); |
1eb52238 | 963 | __intel_fbc_post_update(crtc); |
ab34a7e8 | 964 | mutex_unlock(&fbc->lock); |
7ff0ebcc RV |
965 | } |
966 | ||
261fe99a PZ |
967 | static unsigned int intel_fbc_get_frontbuffer_bit(struct intel_fbc *fbc) |
968 | { | |
969 | if (fbc->enabled) | |
970 | return to_intel_plane(fbc->crtc->base.primary)->frontbuffer_bit; | |
971 | else | |
972 | return fbc->possible_framebuffer_bits; | |
973 | } | |
974 | ||
dbef0f15 PZ |
975 | void intel_fbc_invalidate(struct drm_i915_private *dev_priv, |
976 | unsigned int frontbuffer_bits, | |
977 | enum fb_op_origin origin) | |
978 | { | |
ab34a7e8 | 979 | struct intel_fbc *fbc = &dev_priv->fbc; |
dbef0f15 | 980 | |
9f218336 | 981 | if (!fbc_supported(dev_priv)) |
0bf73c36 PZ |
982 | return; |
983 | ||
0dd81544 | 984 | if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP) |
dbef0f15 PZ |
985 | return; |
986 | ||
ab34a7e8 | 987 | mutex_lock(&fbc->lock); |
25ad93fd | 988 | |
261fe99a | 989 | fbc->busy_bits |= intel_fbc_get_frontbuffer_bit(fbc) & frontbuffer_bits; |
dbef0f15 | 990 | |
5bc40472 | 991 | if (fbc->enabled && fbc->busy_bits) |
60eb2cc7 | 992 | intel_fbc_deactivate(dev_priv); |
25ad93fd | 993 | |
ab34a7e8 | 994 | mutex_unlock(&fbc->lock); |
dbef0f15 PZ |
995 | } |
996 | ||
997 | void intel_fbc_flush(struct drm_i915_private *dev_priv, | |
6f4551fe | 998 | unsigned int frontbuffer_bits, enum fb_op_origin origin) |
dbef0f15 | 999 | { |
ab34a7e8 PZ |
1000 | struct intel_fbc *fbc = &dev_priv->fbc; |
1001 | ||
9f218336 | 1002 | if (!fbc_supported(dev_priv)) |
0bf73c36 PZ |
1003 | return; |
1004 | ||
ab34a7e8 | 1005 | mutex_lock(&fbc->lock); |
dbef0f15 | 1006 | |
ab34a7e8 | 1007 | fbc->busy_bits &= ~frontbuffer_bits; |
dbef0f15 | 1008 | |
ab28a547 PZ |
1009 | if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP) |
1010 | goto out; | |
1011 | ||
261fe99a PZ |
1012 | if (!fbc->busy_bits && fbc->enabled && |
1013 | (frontbuffer_bits & intel_fbc_get_frontbuffer_bit(fbc))) { | |
0dd81544 | 1014 | if (fbc->active) |
ee7d6cfa | 1015 | intel_fbc_recompress(dev_priv); |
0dd81544 | 1016 | else |
1eb52238 | 1017 | __intel_fbc_post_update(fbc->crtc); |
6f4551fe | 1018 | } |
25ad93fd | 1019 | |
ab28a547 | 1020 | out: |
ab34a7e8 | 1021 | mutex_unlock(&fbc->lock); |
dbef0f15 PZ |
1022 | } |
1023 | ||
f51be2e0 PZ |
1024 | /** |
1025 | * intel_fbc_choose_crtc - select a CRTC to enable FBC on | |
1026 | * @dev_priv: i915 device instance | |
1027 | * @state: the atomic state structure | |
1028 | * | |
1029 | * This function looks at the proposed state for CRTCs and planes, then chooses | |
1030 | * which pipe is going to have FBC by setting intel_crtc_state->enable_fbc to | |
1031 | * true. | |
1032 | * | |
1033 | * Later, intel_fbc_enable is going to look for state->enable_fbc and then maybe | |
1034 | * enable FBC for the chosen CRTC. If it does, it will set dev_priv->fbc.crtc. | |
1035 | */ | |
1036 | void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv, | |
1037 | struct drm_atomic_state *state) | |
1038 | { | |
1039 | struct intel_fbc *fbc = &dev_priv->fbc; | |
f51be2e0 PZ |
1040 | struct drm_plane *plane; |
1041 | struct drm_plane_state *plane_state; | |
4f8f2251 | 1042 | bool crtc_chosen = false; |
ba67fab0 | 1043 | int i; |
f51be2e0 PZ |
1044 | |
1045 | mutex_lock(&fbc->lock); | |
1046 | ||
4f8f2251 PZ |
1047 | /* Does this atomic commit involve the CRTC currently tied to FBC? */ |
1048 | if (fbc->crtc && | |
1049 | !drm_atomic_get_existing_crtc_state(state, &fbc->crtc->base)) | |
f51be2e0 PZ |
1050 | goto out; |
1051 | ||
ee2be309 PZ |
1052 | if (!intel_fbc_can_enable(dev_priv)) |
1053 | goto out; | |
1054 | ||
f51be2e0 PZ |
1055 | /* Simply choose the first CRTC that is compatible and has a visible |
1056 | * plane. We could go for fancier schemes such as checking the plane | |
1057 | * size, but this would just affect the few platforms that don't tie FBC | |
1058 | * to pipe or plane A. */ | |
e96b206f | 1059 | for_each_new_plane_in_state(state, plane, plane_state, i) { |
f51be2e0 PZ |
1060 | struct intel_plane_state *intel_plane_state = |
1061 | to_intel_plane_state(plane_state); | |
ba67fab0 | 1062 | struct intel_crtc_state *intel_crtc_state; |
f7e9b004 | 1063 | struct intel_crtc *crtc = to_intel_crtc(plane_state->crtc); |
f51be2e0 | 1064 | |
936e71e3 | 1065 | if (!intel_plane_state->base.visible) |
f51be2e0 PZ |
1066 | continue; |
1067 | ||
f7e9b004 PZ |
1068 | if (fbc_on_pipe_a_only(dev_priv) && crtc->pipe != PIPE_A) |
1069 | continue; | |
1070 | ||
1071 | if (fbc_on_plane_a_only(dev_priv) && crtc->plane != PLANE_A) | |
03e39104 PZ |
1072 | continue; |
1073 | ||
ba67fab0 | 1074 | intel_crtc_state = to_intel_crtc_state( |
f7e9b004 | 1075 | drm_atomic_get_existing_crtc_state(state, &crtc->base)); |
f51be2e0 | 1076 | |
ba67fab0 | 1077 | intel_crtc_state->enable_fbc = true; |
f7e9b004 | 1078 | crtc_chosen = true; |
ba67fab0 | 1079 | break; |
f51be2e0 PZ |
1080 | } |
1081 | ||
f7e9b004 PZ |
1082 | if (!crtc_chosen) |
1083 | fbc->no_fbc_reason = "no suitable CRTC for FBC"; | |
1084 | ||
f51be2e0 PZ |
1085 | out: |
1086 | mutex_unlock(&fbc->lock); | |
1087 | } | |
1088 | ||
d029bcad PZ |
1089 | /** |
1090 | * intel_fbc_enable: tries to enable FBC on the CRTC | |
1091 | * @crtc: the CRTC | |
62f90b38 DV |
1092 | * @crtc_state: corresponding &drm_crtc_state for @crtc |
1093 | * @plane_state: corresponding &drm_plane_state for the primary plane of @crtc | |
d029bcad | 1094 | * |
f51be2e0 | 1095 | * This function checks if the given CRTC was chosen for FBC, then enables it if |
49227c4a PZ |
1096 | * possible. Notice that it doesn't activate FBC. It is valid to call |
1097 | * intel_fbc_enable multiple times for the same pipe without an | |
1098 | * intel_fbc_disable in the middle, as long as it is deactivated. | |
d029bcad | 1099 | */ |
faf68d92 ML |
1100 | void intel_fbc_enable(struct intel_crtc *crtc, |
1101 | struct intel_crtc_state *crtc_state, | |
1102 | struct intel_plane_state *plane_state) | |
d029bcad | 1103 | { |
fac5e23e | 1104 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
ab34a7e8 | 1105 | struct intel_fbc *fbc = &dev_priv->fbc; |
d029bcad PZ |
1106 | |
1107 | if (!fbc_supported(dev_priv)) | |
1108 | return; | |
1109 | ||
ab34a7e8 | 1110 | mutex_lock(&fbc->lock); |
d029bcad | 1111 | |
ab34a7e8 | 1112 | if (fbc->enabled) { |
49227c4a PZ |
1113 | WARN_ON(fbc->crtc == NULL); |
1114 | if (fbc->crtc == crtc) { | |
faf68d92 | 1115 | WARN_ON(!crtc_state->enable_fbc); |
49227c4a PZ |
1116 | WARN_ON(fbc->active); |
1117 | } | |
d029bcad PZ |
1118 | goto out; |
1119 | } | |
1120 | ||
faf68d92 | 1121 | if (!crtc_state->enable_fbc) |
f51be2e0 PZ |
1122 | goto out; |
1123 | ||
ab34a7e8 PZ |
1124 | WARN_ON(fbc->active); |
1125 | WARN_ON(fbc->crtc != NULL); | |
d029bcad | 1126 | |
faf68d92 | 1127 | intel_fbc_update_state_cache(crtc, crtc_state, plane_state); |
c5ecd469 | 1128 | if (intel_fbc_alloc_cfb(crtc)) { |
913a3a6a | 1129 | fbc->no_fbc_reason = "not enough stolen memory"; |
c5ecd469 PZ |
1130 | goto out; |
1131 | } | |
1132 | ||
d029bcad | 1133 | DRM_DEBUG_KMS("Enabling FBC on pipe %c\n", pipe_name(crtc->pipe)); |
ab34a7e8 | 1134 | fbc->no_fbc_reason = "FBC enabled but not active yet\n"; |
d029bcad | 1135 | |
ab34a7e8 PZ |
1136 | fbc->enabled = true; |
1137 | fbc->crtc = crtc; | |
d029bcad | 1138 | out: |
ab34a7e8 | 1139 | mutex_unlock(&fbc->lock); |
d029bcad PZ |
1140 | } |
1141 | ||
1142 | /** | |
1143 | * __intel_fbc_disable - disable FBC | |
1144 | * @dev_priv: i915 device instance | |
1145 | * | |
1146 | * This is the low level function that actually disables FBC. Callers should | |
1147 | * grab the FBC lock. | |
1148 | */ | |
1149 | static void __intel_fbc_disable(struct drm_i915_private *dev_priv) | |
1150 | { | |
ab34a7e8 PZ |
1151 | struct intel_fbc *fbc = &dev_priv->fbc; |
1152 | struct intel_crtc *crtc = fbc->crtc; | |
d029bcad | 1153 | |
ab34a7e8 PZ |
1154 | WARN_ON(!mutex_is_locked(&fbc->lock)); |
1155 | WARN_ON(!fbc->enabled); | |
1156 | WARN_ON(fbc->active); | |
58f9c0bc | 1157 | WARN_ON(crtc->active); |
d029bcad PZ |
1158 | |
1159 | DRM_DEBUG_KMS("Disabling FBC on pipe %c\n", pipe_name(crtc->pipe)); | |
1160 | ||
c5ecd469 PZ |
1161 | __intel_fbc_cleanup_cfb(dev_priv); |
1162 | ||
ab34a7e8 PZ |
1163 | fbc->enabled = false; |
1164 | fbc->crtc = NULL; | |
d029bcad PZ |
1165 | } |
1166 | ||
1167 | /** | |
c937ab3e | 1168 | * intel_fbc_disable - disable FBC if it's associated with crtc |
d029bcad PZ |
1169 | * @crtc: the CRTC |
1170 | * | |
1171 | * This function disables FBC if it's associated with the provided CRTC. | |
1172 | */ | |
c937ab3e | 1173 | void intel_fbc_disable(struct intel_crtc *crtc) |
d029bcad | 1174 | { |
fac5e23e | 1175 | struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
ab34a7e8 | 1176 | struct intel_fbc *fbc = &dev_priv->fbc; |
d029bcad PZ |
1177 | |
1178 | if (!fbc_supported(dev_priv)) | |
1179 | return; | |
1180 | ||
ab34a7e8 | 1181 | mutex_lock(&fbc->lock); |
4da45616 | 1182 | if (fbc->crtc == crtc) |
d029bcad | 1183 | __intel_fbc_disable(dev_priv); |
ab34a7e8 | 1184 | mutex_unlock(&fbc->lock); |
65c7600f PZ |
1185 | |
1186 | cancel_work_sync(&fbc->work.work); | |
d029bcad PZ |
1187 | } |
1188 | ||
1189 | /** | |
c937ab3e | 1190 | * intel_fbc_global_disable - globally disable FBC |
d029bcad PZ |
1191 | * @dev_priv: i915 device instance |
1192 | * | |
1193 | * This function disables FBC regardless of which CRTC is associated with it. | |
1194 | */ | |
c937ab3e | 1195 | void intel_fbc_global_disable(struct drm_i915_private *dev_priv) |
d029bcad | 1196 | { |
ab34a7e8 PZ |
1197 | struct intel_fbc *fbc = &dev_priv->fbc; |
1198 | ||
d029bcad PZ |
1199 | if (!fbc_supported(dev_priv)) |
1200 | return; | |
1201 | ||
ab34a7e8 PZ |
1202 | mutex_lock(&fbc->lock); |
1203 | if (fbc->enabled) | |
d029bcad | 1204 | __intel_fbc_disable(dev_priv); |
ab34a7e8 | 1205 | mutex_unlock(&fbc->lock); |
65c7600f PZ |
1206 | |
1207 | cancel_work_sync(&fbc->work.work); | |
d029bcad PZ |
1208 | } |
1209 | ||
61a585d6 PZ |
1210 | static void intel_fbc_underrun_work_fn(struct work_struct *work) |
1211 | { | |
1212 | struct drm_i915_private *dev_priv = | |
1213 | container_of(work, struct drm_i915_private, fbc.underrun_work); | |
1214 | struct intel_fbc *fbc = &dev_priv->fbc; | |
1215 | ||
1216 | mutex_lock(&fbc->lock); | |
1217 | ||
1218 | /* Maybe we were scheduled twice. */ | |
1219 | if (fbc->underrun_detected) | |
1220 | goto out; | |
1221 | ||
1222 | DRM_DEBUG_KMS("Disabling FBC due to FIFO underrun.\n"); | |
1223 | fbc->underrun_detected = true; | |
1224 | ||
1225 | intel_fbc_deactivate(dev_priv); | |
1226 | out: | |
1227 | mutex_unlock(&fbc->lock); | |
1228 | } | |
1229 | ||
1230 | /** | |
1231 | * intel_fbc_handle_fifo_underrun_irq - disable FBC when we get a FIFO underrun | |
1232 | * @dev_priv: i915 device instance | |
1233 | * | |
1234 | * Without FBC, most underruns are harmless and don't really cause too many | |
1235 | * problems, except for an annoying message on dmesg. With FBC, underruns can | |
1236 | * become black screens or even worse, especially when paired with bad | |
1237 | * watermarks. So in order for us to be on the safe side, completely disable FBC | |
1238 | * in case we ever detect a FIFO underrun on any pipe. An underrun on any pipe | |
1239 | * already suggests that watermarks may be bad, so try to be as safe as | |
1240 | * possible. | |
1241 | * | |
1242 | * This function is called from the IRQ handler. | |
1243 | */ | |
1244 | void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv) | |
1245 | { | |
1246 | struct intel_fbc *fbc = &dev_priv->fbc; | |
1247 | ||
1248 | if (!fbc_supported(dev_priv)) | |
1249 | return; | |
1250 | ||
1251 | /* There's no guarantee that underrun_detected won't be set to true | |
1252 | * right after this check and before the work is scheduled, but that's | |
1253 | * not a problem since we'll check it again under the work function | |
1254 | * while FBC is locked. This check here is just to prevent us from | |
1255 | * unnecessarily scheduling the work, and it relies on the fact that we | |
1256 | * never switch underrun_detect back to false after it's true. */ | |
1257 | if (READ_ONCE(fbc->underrun_detected)) | |
1258 | return; | |
1259 | ||
1260 | schedule_work(&fbc->underrun_work); | |
1261 | } | |
1262 | ||
010cf73d PZ |
1263 | /** |
1264 | * intel_fbc_init_pipe_state - initialize FBC's CRTC visibility tracking | |
1265 | * @dev_priv: i915 device instance | |
1266 | * | |
1267 | * The FBC code needs to track CRTC visibility since the older platforms can't | |
1268 | * have FBC enabled while multiple pipes are used. This function does the | |
1269 | * initial setup at driver load to make sure FBC is matching the real hardware. | |
1270 | */ | |
1271 | void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv) | |
1272 | { | |
1273 | struct intel_crtc *crtc; | |
1274 | ||
1275 | /* Don't even bother tracking anything if we don't need. */ | |
1276 | if (!no_fbc_on_multiple_pipes(dev_priv)) | |
1277 | return; | |
1278 | ||
91c8a326 | 1279 | for_each_intel_crtc(&dev_priv->drm, crtc) |
525b9311 | 1280 | if (intel_crtc_active(crtc) && |
1d4258db | 1281 | crtc->base.primary->state->visible) |
010cf73d PZ |
1282 | dev_priv->fbc.visible_pipes_mask |= (1 << crtc->pipe); |
1283 | } | |
1284 | ||
80788a0f PZ |
1285 | /* |
1286 | * The DDX driver changes its behavior depending on the value it reads from | |
1287 | * i915.enable_fbc, so sanitize it by translating the default value into either | |
1288 | * 0 or 1 in order to allow it to know what's going on. | |
1289 | * | |
1290 | * Notice that this is done at driver initialization and we still allow user | |
1291 | * space to change the value during runtime without sanitizing it again. IGT | |
1292 | * relies on being able to change i915.enable_fbc at runtime. | |
1293 | */ | |
1294 | static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv) | |
1295 | { | |
1296 | if (i915.enable_fbc >= 0) | |
1297 | return !!i915.enable_fbc; | |
1298 | ||
36dbc4d7 CW |
1299 | if (!HAS_FBC(dev_priv)) |
1300 | return 0; | |
1301 | ||
fd7d6c5c | 1302 | if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) |
80788a0f PZ |
1303 | return 1; |
1304 | ||
1305 | return 0; | |
1306 | } | |
1307 | ||
36dbc4d7 CW |
1308 | static bool need_fbc_vtd_wa(struct drm_i915_private *dev_priv) |
1309 | { | |
36dbc4d7 | 1310 | /* WaFbcTurnOffFbcWhenHyperVisorIsUsed:skl,bxt */ |
80debff8 | 1311 | if (intel_vtd_active() && |
36dbc4d7 CW |
1312 | (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))) { |
1313 | DRM_INFO("Disabling framebuffer compression (FBC) to prevent screen flicker with VT-d enabled\n"); | |
1314 | return true; | |
1315 | } | |
36dbc4d7 CW |
1316 | |
1317 | return false; | |
1318 | } | |
1319 | ||
94b83957 RV |
1320 | /** |
1321 | * intel_fbc_init - Initialize FBC | |
1322 | * @dev_priv: the i915 device | |
1323 | * | |
1324 | * This function might be called during PM init process. | |
1325 | */ | |
7ff0ebcc RV |
1326 | void intel_fbc_init(struct drm_i915_private *dev_priv) |
1327 | { | |
ab34a7e8 | 1328 | struct intel_fbc *fbc = &dev_priv->fbc; |
dbef0f15 PZ |
1329 | enum pipe pipe; |
1330 | ||
ab34a7e8 | 1331 | INIT_WORK(&fbc->work.work, intel_fbc_work_fn); |
61a585d6 | 1332 | INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn); |
ab34a7e8 PZ |
1333 | mutex_init(&fbc->lock); |
1334 | fbc->enabled = false; | |
1335 | fbc->active = false; | |
1336 | fbc->work.scheduled = false; | |
25ad93fd | 1337 | |
36dbc4d7 CW |
1338 | if (need_fbc_vtd_wa(dev_priv)) |
1339 | mkwrite_device_info(dev_priv)->has_fbc = false; | |
1340 | ||
80788a0f PZ |
1341 | i915.enable_fbc = intel_sanitize_fbc_option(dev_priv); |
1342 | DRM_DEBUG_KMS("Sanitized enable_fbc value: %d\n", i915.enable_fbc); | |
1343 | ||
7ff0ebcc | 1344 | if (!HAS_FBC(dev_priv)) { |
ab34a7e8 | 1345 | fbc->no_fbc_reason = "unsupported by this chipset"; |
7ff0ebcc RV |
1346 | return; |
1347 | } | |
1348 | ||
dbef0f15 | 1349 | for_each_pipe(dev_priv, pipe) { |
ab34a7e8 | 1350 | fbc->possible_framebuffer_bits |= |
dbef0f15 PZ |
1351 | INTEL_FRONTBUFFER_PRIMARY(pipe); |
1352 | ||
57105022 | 1353 | if (fbc_on_pipe_a_only(dev_priv)) |
dbef0f15 PZ |
1354 | break; |
1355 | } | |
1356 | ||
8c40074c | 1357 | /* This value was pulled out of someone's hat */ |
5697d60f | 1358 | if (INTEL_GEN(dev_priv) <= 4 && !IS_GM45(dev_priv)) |
7ff0ebcc | 1359 | I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT); |
7ff0ebcc | 1360 | |
b07ea0fa | 1361 | /* We still don't have any sort of hardware state readout for FBC, so |
0e631adc PZ |
1362 | * deactivate it in case the BIOS activated it to make sure software |
1363 | * matches the hardware state. */ | |
8c40074c PZ |
1364 | if (intel_fbc_hw_is_active(dev_priv)) |
1365 | intel_fbc_hw_deactivate(dev_priv); | |
7ff0ebcc | 1366 | } |