Commit | Line | Data |
---|---|---|
41a36b73 DV |
1 | /* |
2 | * Copyright © 2008-2015 Intel Corporation | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
21 | * IN THE SOFTWARE. | |
22 | */ | |
23 | ||
24 | #include <drm/drmP.h> | |
25 | #include <drm/i915_drm.h> | |
26 | #include "i915_drv.h" | |
27 | ||
a794f62a DV |
28 | /** |
29 | * DOC: fence register handling | |
30 | * | |
31 | * Important to avoid confusions: "fences" in the i915 driver are not execution | |
32 | * fences used to track command completion but hardware detiler objects which | |
33 | * wrap a given range of the global GTT. Each platform has only a fairly limited | |
34 | * set of these objects. | |
35 | * | |
36 | * Fences are used to detile GTT memory mappings. They're also connected to the | |
34fd3e14 MI |
37 | * hardware frontbuffer render tracking and hence interact with frontbuffer |
38 | * compression. Furthermore on older platforms fences are required for tiled | |
a794f62a DV |
39 | * objects used by the display engine. They can also be used by the render |
40 | * engine - they're required for blitter commands and are optional for render | |
41 | * commands. But on gen4+ both display (with the exception of fbc) and rendering | |
42 | * have their own tiling state bits and don't need fences. | |
43 | * | |
44 | * Also note that fences only support X and Y tiling and hence can't be used for | |
45 | * the fancier new tiling formats like W, Ys and Yf. | |
46 | * | |
47 | * Finally note that because fences are such a restricted resource they're | |
48 | * dynamically associated with objects. Furthermore fence state is committed to | |
34fd3e14 MI |
49 | * the hardware lazily to avoid unnecessary stalls on gen2/3. Therefore code must |
50 | * explicitly call i915_gem_object_get_fence() to synchronize fencing status | |
a794f62a DV |
51 | * for cpu access. Also note that some code wants an unfenced view, for those |
52 | * cases the fence can be removed forcefully with i915_gem_object_put_fence(). | |
53 | * | |
54 | * Internally these functions will synchronize with userspace access by removing | |
55 | * CPU ptes into GTT mmaps (not the GTT ptes themselves) as needed. | |
56 | */ | |
57 | ||
49ef5294 CW |
58 | #define pipelined 0 |
59 | ||
60 | static void i965_write_fence_reg(struct drm_i915_fence_reg *fence, | |
61 | struct i915_vma *vma) | |
41a36b73 | 62 | { |
f0f59a00 | 63 | i915_reg_t fence_reg_lo, fence_reg_hi; |
41a36b73 | 64 | int fence_pitch_shift; |
49ef5294 | 65 | u64 val; |
41a36b73 | 66 | |
49ef5294 CW |
67 | if (INTEL_INFO(fence->i915)->gen >= 6) { |
68 | fence_reg_lo = FENCE_REG_GEN6_LO(fence->id); | |
69 | fence_reg_hi = FENCE_REG_GEN6_HI(fence->id); | |
eecf613a | 70 | fence_pitch_shift = GEN6_FENCE_PITCH_SHIFT; |
49ef5294 | 71 | |
41a36b73 | 72 | } else { |
49ef5294 CW |
73 | fence_reg_lo = FENCE_REG_965_LO(fence->id); |
74 | fence_reg_hi = FENCE_REG_965_HI(fence->id); | |
41a36b73 DV |
75 | fence_pitch_shift = I965_FENCE_PITCH_SHIFT; |
76 | } | |
77 | ||
49ef5294 CW |
78 | val = 0; |
79 | if (vma) { | |
80 | unsigned int tiling = i915_gem_object_get_tiling(vma->obj); | |
81 | bool is_y_tiled = tiling == I915_TILING_Y; | |
82 | unsigned int stride = i915_gem_object_get_stride(vma->obj); | |
83 | u32 row_size = stride * (is_y_tiled ? 32 : 8); | |
84 | u32 size = rounddown((u32)vma->node.size, row_size); | |
a83718b6 CW |
85 | |
86 | val = ((vma->node.start + size - 4096) & 0xfffff000) << 32; | |
87 | val |= vma->node.start & 0xfffff000; | |
88 | val |= (u64)((stride / 128) - 1) << fence_pitch_shift; | |
49ef5294 CW |
89 | if (is_y_tiled) |
90 | val |= BIT(I965_FENCE_TILING_Y_SHIFT); | |
41a36b73 | 91 | val |= I965_FENCE_REG_VALID; |
49ef5294 | 92 | } |
41a36b73 | 93 | |
49ef5294 CW |
94 | if (!pipelined) { |
95 | struct drm_i915_private *dev_priv = fence->i915; | |
96 | ||
97 | /* To w/a incoherency with non-atomic 64-bit register updates, | |
98 | * we split the 64-bit update into two 32-bit writes. In order | |
99 | * for a partial fence not to be evaluated between writes, we | |
100 | * precede the update with write to turn off the fence register, | |
101 | * and only enable the fence as the last step. | |
102 | * | |
103 | * For extra levels of paranoia, we make sure each step lands | |
104 | * before applying the next step. | |
105 | */ | |
106 | I915_WRITE(fence_reg_lo, 0); | |
107 | POSTING_READ(fence_reg_lo); | |
41a36b73 | 108 | |
49ef5294 CW |
109 | I915_WRITE(fence_reg_hi, upper_32_bits(val)); |
110 | I915_WRITE(fence_reg_lo, lower_32_bits(val)); | |
eecf613a | 111 | POSTING_READ(fence_reg_lo); |
41a36b73 DV |
112 | } |
113 | } | |
114 | ||
49ef5294 CW |
115 | static void i915_write_fence_reg(struct drm_i915_fence_reg *fence, |
116 | struct i915_vma *vma) | |
41a36b73 | 117 | { |
41a36b73 DV |
118 | u32 val; |
119 | ||
49ef5294 CW |
120 | val = 0; |
121 | if (vma) { | |
122 | unsigned int tiling = i915_gem_object_get_tiling(vma->obj); | |
123 | bool is_y_tiled = tiling == I915_TILING_Y; | |
124 | unsigned int stride = i915_gem_object_get_stride(vma->obj); | |
41a36b73 DV |
125 | int pitch_val; |
126 | int tile_width; | |
127 | ||
a83718b6 CW |
128 | WARN((vma->node.start & ~I915_FENCE_START_MASK) || |
129 | !is_power_of_2(vma->node.size) || | |
130 | (vma->node.start & (vma->node.size - 1)), | |
131 | "object 0x%08llx [fenceable? %d] not 1M or pot-size (0x%08llx) aligned\n", | |
05a20d09 CW |
132 | vma->node.start, |
133 | i915_vma_is_map_and_fenceable(vma), | |
134 | vma->node.size); | |
41a36b73 | 135 | |
49ef5294 | 136 | if (is_y_tiled && HAS_128_BYTE_Y_TILING(fence->i915)) |
41a36b73 DV |
137 | tile_width = 128; |
138 | else | |
139 | tile_width = 512; | |
140 | ||
141 | /* Note: pitch better be a power of two tile widths */ | |
3e510a8e | 142 | pitch_val = stride / tile_width; |
41a36b73 DV |
143 | pitch_val = ffs(pitch_val) - 1; |
144 | ||
a83718b6 | 145 | val = vma->node.start; |
49ef5294 CW |
146 | if (is_y_tiled) |
147 | val |= BIT(I830_FENCE_TILING_Y_SHIFT); | |
a83718b6 | 148 | val |= I915_FENCE_SIZE_BITS(vma->node.size); |
41a36b73 DV |
149 | val |= pitch_val << I830_FENCE_PITCH_SHIFT; |
150 | val |= I830_FENCE_REG_VALID; | |
49ef5294 | 151 | } |
41a36b73 | 152 | |
49ef5294 CW |
153 | if (!pipelined) { |
154 | struct drm_i915_private *dev_priv = fence->i915; | |
155 | i915_reg_t reg = FENCE_REG(fence->id); | |
156 | ||
157 | I915_WRITE(reg, val); | |
158 | POSTING_READ(reg); | |
159 | } | |
41a36b73 DV |
160 | } |
161 | ||
49ef5294 CW |
162 | static void i830_write_fence_reg(struct drm_i915_fence_reg *fence, |
163 | struct i915_vma *vma) | |
41a36b73 | 164 | { |
a83718b6 | 165 | u32 val; |
41a36b73 | 166 | |
49ef5294 CW |
167 | val = 0; |
168 | if (vma) { | |
169 | unsigned int tiling = i915_gem_object_get_tiling(vma->obj); | |
170 | bool is_y_tiled = tiling == I915_TILING_Y; | |
171 | unsigned int stride = i915_gem_object_get_stride(vma->obj); | |
a83718b6 | 172 | u32 pitch_val; |
41a36b73 | 173 | |
a83718b6 CW |
174 | WARN((vma->node.start & ~I830_FENCE_START_MASK) || |
175 | !is_power_of_2(vma->node.size) || | |
176 | (vma->node.start & (vma->node.size - 1)), | |
177 | "object 0x%08llx not 512K or pot-size 0x%08llx aligned\n", | |
178 | vma->node.start, vma->node.size); | |
41a36b73 | 179 | |
3e510a8e | 180 | pitch_val = stride / 128; |
41a36b73 DV |
181 | pitch_val = ffs(pitch_val) - 1; |
182 | ||
a83718b6 | 183 | val = vma->node.start; |
49ef5294 CW |
184 | if (is_y_tiled) |
185 | val |= BIT(I830_FENCE_TILING_Y_SHIFT); | |
a83718b6 | 186 | val |= I830_FENCE_SIZE_BITS(vma->node.size); |
41a36b73 DV |
187 | val |= pitch_val << I830_FENCE_PITCH_SHIFT; |
188 | val |= I830_FENCE_REG_VALID; | |
49ef5294 | 189 | } |
41a36b73 | 190 | |
49ef5294 CW |
191 | if (!pipelined) { |
192 | struct drm_i915_private *dev_priv = fence->i915; | |
193 | i915_reg_t reg = FENCE_REG(fence->id); | |
41a36b73 | 194 | |
49ef5294 CW |
195 | I915_WRITE(reg, val); |
196 | POSTING_READ(reg); | |
197 | } | |
41a36b73 DV |
198 | } |
199 | ||
49ef5294 CW |
200 | static void fence_write(struct drm_i915_fence_reg *fence, |
201 | struct i915_vma *vma) | |
41a36b73 | 202 | { |
49ef5294 CW |
203 | /* Previous access through the fence register is marshalled by |
204 | * the mb() inside the fault handlers (i915_gem_release_mmaps) | |
205 | * and explicitly managed for internal users. | |
41a36b73 | 206 | */ |
49ef5294 CW |
207 | |
208 | if (IS_GEN2(fence->i915)) | |
209 | i830_write_fence_reg(fence, vma); | |
210 | else if (IS_GEN3(fence->i915)) | |
211 | i915_write_fence_reg(fence, vma); | |
212 | else | |
213 | i965_write_fence_reg(fence, vma); | |
214 | ||
215 | /* Access through the fenced region afterwards is | |
216 | * ordered by the posting reads whilst writing the registers. | |
41a36b73 | 217 | */ |
41a36b73 | 218 | |
49ef5294 | 219 | fence->dirty = false; |
41a36b73 DV |
220 | } |
221 | ||
49ef5294 CW |
222 | static int fence_update(struct drm_i915_fence_reg *fence, |
223 | struct i915_vma *vma) | |
41a36b73 | 224 | { |
49ef5294 | 225 | int ret; |
41a36b73 | 226 | |
49ef5294 CW |
227 | if (vma) { |
228 | if (!i915_vma_is_map_and_fenceable(vma)) | |
229 | return -EINVAL; | |
41a36b73 | 230 | |
49ef5294 CW |
231 | if (WARN(!i915_gem_object_get_stride(vma->obj) || |
232 | !i915_gem_object_get_tiling(vma->obj), | |
233 | "bogus fence setup with stride: 0x%x, tiling mode: %i\n", | |
234 | i915_gem_object_get_stride(vma->obj), | |
235 | i915_gem_object_get_tiling(vma->obj))) | |
236 | return -EINVAL; | |
237 | ||
238 | ret = i915_gem_active_retire(&vma->last_fence, | |
239 | &vma->obj->base.dev->struct_mutex); | |
240 | if (ret) | |
241 | return ret; | |
41a36b73 | 242 | } |
41a36b73 | 243 | |
49ef5294 CW |
244 | if (fence->vma) { |
245 | ret = i915_gem_active_retire(&fence->vma->last_fence, | |
246 | &fence->vma->obj->base.dev->struct_mutex); | |
247 | if (ret) | |
248 | return ret; | |
249 | } | |
41a36b73 | 250 | |
49ef5294 CW |
251 | if (fence->vma && fence->vma != vma) { |
252 | /* Ensure that all userspace CPU access is completed before | |
253 | * stealing the fence. | |
254 | */ | |
255 | i915_gem_release_mmap(fence->vma->obj); | |
41a36b73 | 256 | |
49ef5294 CW |
257 | fence->vma->fence = NULL; |
258 | fence->vma = NULL; | |
259 | ||
260 | list_move(&fence->link, &fence->i915->mm.fence_list); | |
261 | } | |
262 | ||
263 | fence_write(fence, vma); | |
264 | ||
265 | if (vma) { | |
266 | if (fence->vma != vma) { | |
267 | vma->fence = fence; | |
268 | fence->vma = vma; | |
269 | } | |
270 | ||
271 | list_move_tail(&fence->link, &fence->i915->mm.fence_list); | |
272 | } | |
273 | ||
274 | return 0; | |
41a36b73 DV |
275 | } |
276 | ||
a794f62a | 277 | /** |
49ef5294 CW |
278 | * i915_vma_put_fence - force-remove fence for a VMA |
279 | * @vma: vma to map linearly (not through a fence reg) | |
a794f62a DV |
280 | * |
281 | * This function force-removes any fence from the given object, which is useful | |
282 | * if the kernel wants to do untiled GTT access. | |
283 | * | |
284 | * Returns: | |
285 | * | |
286 | * 0 on success, negative error code on failure. | |
287 | */ | |
41a36b73 | 288 | int |
49ef5294 | 289 | i915_vma_put_fence(struct i915_vma *vma) |
41a36b73 | 290 | { |
49ef5294 | 291 | struct drm_i915_fence_reg *fence = vma->fence; |
41a36b73 | 292 | |
4676dc83 CW |
293 | assert_rpm_wakelock_held(to_i915(vma->vm->dev)); |
294 | ||
49ef5294 | 295 | if (!fence) |
41a36b73 DV |
296 | return 0; |
297 | ||
18034584 | 298 | if (fence->pin_count) |
41a36b73 DV |
299 | return -EBUSY; |
300 | ||
49ef5294 | 301 | return fence_update(fence, NULL); |
41a36b73 DV |
302 | } |
303 | ||
49ef5294 | 304 | static struct drm_i915_fence_reg *fence_find(struct drm_i915_private *dev_priv) |
41a36b73 | 305 | { |
49ef5294 | 306 | struct drm_i915_fence_reg *fence; |
41a36b73 | 307 | |
49ef5294 CW |
308 | list_for_each_entry(fence, &dev_priv->mm.fence_list, link) { |
309 | if (fence->pin_count) | |
41a36b73 DV |
310 | continue; |
311 | ||
49ef5294 | 312 | return fence; |
41a36b73 DV |
313 | } |
314 | ||
41a36b73 | 315 | /* Wait for completion of pending flips which consume fences */ |
49ef5294 | 316 | if (intel_has_pending_fb_unpin(&dev_priv->drm)) |
41a36b73 DV |
317 | return ERR_PTR(-EAGAIN); |
318 | ||
319 | return ERR_PTR(-EDEADLK); | |
320 | } | |
321 | ||
322 | /** | |
49ef5294 CW |
323 | * i915_vma_get_fence - set up fencing for a vma |
324 | * @vma: vma to map through a fence reg | |
41a36b73 DV |
325 | * |
326 | * When mapping objects through the GTT, userspace wants to be able to write | |
327 | * to them without having to worry about swizzling if the object is tiled. | |
328 | * This function walks the fence regs looking for a free one for @obj, | |
329 | * stealing one if it can't find any. | |
330 | * | |
331 | * It then sets up the reg based on the object's properties: address, pitch | |
332 | * and tiling format. | |
333 | * | |
334 | * For an untiled surface, this removes any existing fence. | |
a794f62a DV |
335 | * |
336 | * Returns: | |
337 | * | |
338 | * 0 on success, negative error code on failure. | |
41a36b73 DV |
339 | */ |
340 | int | |
49ef5294 | 341 | i915_vma_get_fence(struct i915_vma *vma) |
41a36b73 | 342 | { |
49ef5294 CW |
343 | struct drm_i915_fence_reg *fence; |
344 | struct i915_vma *set = i915_gem_object_is_tiled(vma->obj) ? vma : NULL; | |
41a36b73 | 345 | |
7c108fd8 CW |
346 | /* Note that we revoke fences on runtime suspend. Therefore the user |
347 | * must keep the device awake whilst using the fence. | |
348 | */ | |
4676dc83 CW |
349 | assert_rpm_wakelock_held(to_i915(vma->vm->dev)); |
350 | ||
41a36b73 | 351 | /* Just update our place in the LRU if our fence is getting reused. */ |
49ef5294 CW |
352 | if (vma->fence) { |
353 | fence = vma->fence; | |
354 | if (!fence->dirty) { | |
355 | list_move_tail(&fence->link, | |
356 | &fence->i915->mm.fence_list); | |
41a36b73 DV |
357 | return 0; |
358 | } | |
49ef5294 CW |
359 | } else if (set) { |
360 | fence = fence_find(to_i915(vma->vm->dev)); | |
361 | if (IS_ERR(fence)) | |
362 | return PTR_ERR(fence); | |
41a36b73 DV |
363 | } else |
364 | return 0; | |
365 | ||
49ef5294 | 366 | return fence_update(fence, set); |
41a36b73 DV |
367 | } |
368 | ||
a794f62a DV |
369 | /** |
370 | * i915_gem_restore_fences - restore fence state | |
371 | * @dev: DRM device | |
372 | * | |
373 | * Restore the hw fence state to match the software tracking again, to be called | |
7c108fd8 CW |
374 | * after a gpu reset and on resume. Note that on runtime suspend we only cancel |
375 | * the fences, to be reacquired by the user later. | |
a794f62a | 376 | */ |
41a36b73 DV |
377 | void i915_gem_restore_fences(struct drm_device *dev) |
378 | { | |
fac5e23e | 379 | struct drm_i915_private *dev_priv = to_i915(dev); |
41a36b73 DV |
380 | int i; |
381 | ||
382 | for (i = 0; i < dev_priv->num_fence_regs; i++) { | |
383 | struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; | |
4fc788f5 | 384 | struct i915_vma *vma = reg->vma; |
41a36b73 DV |
385 | |
386 | /* | |
387 | * Commit delayed tiling changes if we have an object still | |
388 | * attached to the fence, otherwise just clear the fence. | |
389 | */ | |
4676dc83 CW |
390 | if (vma && !i915_gem_object_is_tiled(vma->obj)) { |
391 | GEM_BUG_ON(!reg->dirty); | |
275f039d | 392 | GEM_BUG_ON(!list_empty(&vma->obj->userfault_link)); |
4676dc83 CW |
393 | |
394 | list_move(®->link, &dev_priv->mm.fence_list); | |
395 | vma->fence = NULL; | |
4fc788f5 | 396 | vma = NULL; |
4676dc83 | 397 | } |
4fc788f5 | 398 | |
4676dc83 CW |
399 | fence_write(reg, vma); |
400 | reg->vma = vma; | |
41a36b73 DV |
401 | } |
402 | } | |
7f96ecaf DV |
403 | |
404 | /** | |
3271dca4 | 405 | * DOC: tiling swizzling details |
7f96ecaf DV |
406 | * |
407 | * The idea behind tiling is to increase cache hit rates by rearranging | |
408 | * pixel data so that a group of pixel accesses are in the same cacheline. | |
409 | * Performance improvement from doing this on the back/depth buffer are on | |
410 | * the order of 30%. | |
411 | * | |
412 | * Intel architectures make this somewhat more complicated, though, by | |
413 | * adjustments made to addressing of data when the memory is in interleaved | |
414 | * mode (matched pairs of DIMMS) to improve memory bandwidth. | |
415 | * For interleaved memory, the CPU sends every sequential 64 bytes | |
416 | * to an alternate memory channel so it can get the bandwidth from both. | |
417 | * | |
418 | * The GPU also rearranges its accesses for increased bandwidth to interleaved | |
419 | * memory, and it matches what the CPU does for non-tiled. However, when tiled | |
420 | * it does it a little differently, since one walks addresses not just in the | |
421 | * X direction but also Y. So, along with alternating channels when bit | |
422 | * 6 of the address flips, it also alternates when other bits flip -- Bits 9 | |
423 | * (every 512 bytes, an X tile scanline) and 10 (every two X tile scanlines) | |
424 | * are common to both the 915 and 965-class hardware. | |
425 | * | |
426 | * The CPU also sometimes XORs in higher bits as well, to improve | |
427 | * bandwidth doing strided access like we do so frequently in graphics. This | |
428 | * is called "Channel XOR Randomization" in the MCH documentation. The result | |
429 | * is that the CPU is XORing in either bit 11 or bit 17 to bit 6 of its address | |
430 | * decode. | |
431 | * | |
432 | * All of this bit 6 XORing has an effect on our memory management, | |
433 | * as we need to make sure that the 3d driver can correctly address object | |
434 | * contents. | |
435 | * | |
436 | * If we don't have interleaved memory, all tiling is safe and no swizzling is | |
437 | * required. | |
438 | * | |
439 | * When bit 17 is XORed in, we simply refuse to tile at all. Bit | |
34fd3e14 | 440 | * 17 is not just a page offset, so as we page an object out and back in, |
7f96ecaf DV |
441 | * individual pages in it will have different bit 17 addresses, resulting in |
442 | * each 64 bytes being swapped with its neighbor! | |
443 | * | |
444 | * Otherwise, if interleaved, we have to tell the 3d driver what the address | |
445 | * swizzling it needs to do is, since it's writing with the CPU to the pages | |
446 | * (bit 6 and potentially bit 11 XORed in), and the GPU is reading from the | |
447 | * pages (bit 6, 9, and 10 XORed in), resulting in a cumulative bit swizzling | |
448 | * required by the CPU of XORing in bit 6, 9, 10, and potentially 11, in order | |
449 | * to match what the GPU expects. | |
450 | */ | |
451 | ||
452 | /** | |
3271dca4 DV |
453 | * i915_gem_detect_bit_6_swizzle - detect bit 6 swizzling pattern |
454 | * @dev: DRM device | |
455 | * | |
7f96ecaf DV |
456 | * Detects bit 6 swizzling of address lookup between IGD access and CPU |
457 | * access through main memory. | |
458 | */ | |
459 | void | |
460 | i915_gem_detect_bit_6_swizzle(struct drm_device *dev) | |
461 | { | |
fac5e23e | 462 | struct drm_i915_private *dev_priv = to_i915(dev); |
7f96ecaf DV |
463 | uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; |
464 | uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; | |
465 | ||
11a914c2 | 466 | if (INTEL_GEN(dev_priv) >= 8 || IS_VALLEYVIEW(dev_priv)) { |
7f96ecaf DV |
467 | /* |
468 | * On BDW+, swizzling is not used. We leave the CPU memory | |
469 | * controller in charge of optimizing memory accesses without | |
470 | * the extra address manipulation GPU side. | |
471 | * | |
472 | * VLV and CHV don't have GPU swizzling. | |
473 | */ | |
474 | swizzle_x = I915_BIT_6_SWIZZLE_NONE; | |
475 | swizzle_y = I915_BIT_6_SWIZZLE_NONE; | |
476 | } else if (INTEL_INFO(dev)->gen >= 6) { | |
477 | if (dev_priv->preserve_bios_swizzle) { | |
478 | if (I915_READ(DISP_ARB_CTL) & | |
479 | DISP_TILE_SURFACE_SWIZZLING) { | |
480 | swizzle_x = I915_BIT_6_SWIZZLE_9_10; | |
481 | swizzle_y = I915_BIT_6_SWIZZLE_9; | |
482 | } else { | |
483 | swizzle_x = I915_BIT_6_SWIZZLE_NONE; | |
484 | swizzle_y = I915_BIT_6_SWIZZLE_NONE; | |
485 | } | |
486 | } else { | |
487 | uint32_t dimm_c0, dimm_c1; | |
488 | dimm_c0 = I915_READ(MAD_DIMM_C0); | |
489 | dimm_c1 = I915_READ(MAD_DIMM_C1); | |
490 | dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK; | |
491 | dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK; | |
492 | /* Enable swizzling when the channels are populated | |
493 | * with identically sized dimms. We don't need to check | |
494 | * the 3rd channel because no cpu with gpu attached | |
495 | * ships in that configuration. Also, swizzling only | |
496 | * makes sense for 2 channels anyway. */ | |
497 | if (dimm_c0 == dimm_c1) { | |
498 | swizzle_x = I915_BIT_6_SWIZZLE_9_10; | |
499 | swizzle_y = I915_BIT_6_SWIZZLE_9; | |
500 | } else { | |
501 | swizzle_x = I915_BIT_6_SWIZZLE_NONE; | |
502 | swizzle_y = I915_BIT_6_SWIZZLE_NONE; | |
503 | } | |
504 | } | |
5db94019 | 505 | } else if (IS_GEN5(dev_priv)) { |
7f96ecaf DV |
506 | /* On Ironlake whatever DRAM config, GPU always do |
507 | * same swizzling setup. | |
508 | */ | |
509 | swizzle_x = I915_BIT_6_SWIZZLE_9_10; | |
510 | swizzle_y = I915_BIT_6_SWIZZLE_9; | |
5db94019 | 511 | } else if (IS_GEN2(dev_priv)) { |
7f96ecaf DV |
512 | /* As far as we know, the 865 doesn't have these bit 6 |
513 | * swizzling issues. | |
514 | */ | |
515 | swizzle_x = I915_BIT_6_SWIZZLE_NONE; | |
516 | swizzle_y = I915_BIT_6_SWIZZLE_NONE; | |
5db94019 TU |
517 | } else if (IS_MOBILE(dev_priv) || (IS_GEN3(dev_priv) && |
518 | !IS_G33(dev_priv))) { | |
7f96ecaf DV |
519 | uint32_t dcc; |
520 | ||
521 | /* On 9xx chipsets, channel interleave by the CPU is | |
522 | * determined by DCC. For single-channel, neither the CPU | |
523 | * nor the GPU do swizzling. For dual channel interleaved, | |
524 | * the GPU's interleave is bit 9 and 10 for X tiled, and bit | |
525 | * 9 for Y tiled. The CPU's interleave is independent, and | |
526 | * can be based on either bit 11 (haven't seen this yet) or | |
527 | * bit 17 (common). | |
528 | */ | |
529 | dcc = I915_READ(DCC); | |
530 | switch (dcc & DCC_ADDRESSING_MODE_MASK) { | |
531 | case DCC_ADDRESSING_MODE_SINGLE_CHANNEL: | |
532 | case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC: | |
533 | swizzle_x = I915_BIT_6_SWIZZLE_NONE; | |
534 | swizzle_y = I915_BIT_6_SWIZZLE_NONE; | |
535 | break; | |
536 | case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED: | |
537 | if (dcc & DCC_CHANNEL_XOR_DISABLE) { | |
538 | /* This is the base swizzling by the GPU for | |
539 | * tiled buffers. | |
540 | */ | |
541 | swizzle_x = I915_BIT_6_SWIZZLE_9_10; | |
542 | swizzle_y = I915_BIT_6_SWIZZLE_9; | |
543 | } else if ((dcc & DCC_CHANNEL_XOR_BIT_17) == 0) { | |
544 | /* Bit 11 swizzling by the CPU in addition. */ | |
545 | swizzle_x = I915_BIT_6_SWIZZLE_9_10_11; | |
546 | swizzle_y = I915_BIT_6_SWIZZLE_9_11; | |
547 | } else { | |
548 | /* Bit 17 swizzling by the CPU in addition. */ | |
549 | swizzle_x = I915_BIT_6_SWIZZLE_9_10_17; | |
550 | swizzle_y = I915_BIT_6_SWIZZLE_9_17; | |
551 | } | |
552 | break; | |
553 | } | |
554 | ||
555 | /* check for L-shaped memory aka modified enhanced addressing */ | |
5db94019 | 556 | if (IS_GEN4(dev_priv) && |
0b466dc2 CW |
557 | !(I915_READ(DCC2) & DCC2_MODIFIED_ENHANCED_DISABLE)) { |
558 | swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; | |
559 | swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; | |
7f96ecaf DV |
560 | } |
561 | ||
562 | if (dcc == 0xffffffff) { | |
563 | DRM_ERROR("Couldn't read from MCHBAR. " | |
564 | "Disabling tiling.\n"); | |
565 | swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; | |
566 | swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; | |
567 | } | |
568 | } else { | |
569 | /* The 965, G33, and newer, have a very flexible memory | |
570 | * configuration. It will enable dual-channel mode | |
571 | * (interleaving) on as much memory as it can, and the GPU | |
572 | * will additionally sometimes enable different bit 6 | |
573 | * swizzling for tiled objects from the CPU. | |
574 | * | |
575 | * Here's what I found on the G965: | |
576 | * slot fill memory size swizzling | |
577 | * 0A 0B 1A 1B 1-ch 2-ch | |
578 | * 512 0 0 0 512 0 O | |
579 | * 512 0 512 0 16 1008 X | |
580 | * 512 0 0 512 16 1008 X | |
581 | * 0 512 0 512 16 1008 X | |
582 | * 1024 1024 1024 0 2048 1024 O | |
583 | * | |
584 | * We could probably detect this based on either the DRB | |
585 | * matching, which was the case for the swizzling required in | |
586 | * the table above, or from the 1-ch value being less than | |
587 | * the minimum size of a rank. | |
0b466dc2 CW |
588 | * |
589 | * Reports indicate that the swizzling actually | |
590 | * varies depending upon page placement inside the | |
591 | * channels, i.e. we see swizzled pages where the | |
592 | * banks of memory are paired and unswizzled on the | |
593 | * uneven portion, so leave that as unknown. | |
7f96ecaf | 594 | */ |
0b466dc2 | 595 | if (I915_READ16(C0DRB3) == I915_READ16(C1DRB3)) { |
7f96ecaf DV |
596 | swizzle_x = I915_BIT_6_SWIZZLE_9_10; |
597 | swizzle_y = I915_BIT_6_SWIZZLE_9; | |
598 | } | |
599 | } | |
600 | ||
0b466dc2 CW |
601 | if (swizzle_x == I915_BIT_6_SWIZZLE_UNKNOWN || |
602 | swizzle_y == I915_BIT_6_SWIZZLE_UNKNOWN) { | |
603 | /* Userspace likes to explode if it sees unknown swizzling, | |
604 | * so lie. We will finish the lie when reporting through | |
605 | * the get-tiling-ioctl by reporting the physical swizzle | |
606 | * mode as unknown instead. | |
607 | * | |
608 | * As we don't strictly know what the swizzling is, it may be | |
609 | * bit17 dependent, and so we need to also prevent the pages | |
610 | * from being moved. | |
611 | */ | |
612 | dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES; | |
613 | swizzle_x = I915_BIT_6_SWIZZLE_NONE; | |
614 | swizzle_y = I915_BIT_6_SWIZZLE_NONE; | |
615 | } | |
616 | ||
7f96ecaf DV |
617 | dev_priv->mm.bit_6_swizzle_x = swizzle_x; |
618 | dev_priv->mm.bit_6_swizzle_y = swizzle_y; | |
619 | } | |
620 | ||
3271dca4 | 621 | /* |
7f96ecaf DV |
622 | * Swap every 64 bytes of this page around, to account for it having a new |
623 | * bit 17 of its physical address and therefore being interpreted differently | |
624 | * by the GPU. | |
625 | */ | |
626 | static void | |
627 | i915_gem_swizzle_page(struct page *page) | |
628 | { | |
629 | char temp[64]; | |
630 | char *vaddr; | |
631 | int i; | |
632 | ||
633 | vaddr = kmap(page); | |
634 | ||
635 | for (i = 0; i < PAGE_SIZE; i += 128) { | |
636 | memcpy(temp, &vaddr[i], 64); | |
637 | memcpy(&vaddr[i], &vaddr[i + 64], 64); | |
638 | memcpy(&vaddr[i + 64], temp, 64); | |
639 | } | |
640 | ||
641 | kunmap(page); | |
642 | } | |
643 | ||
3271dca4 DV |
644 | /** |
645 | * i915_gem_object_do_bit_17_swizzle - fixup bit 17 swizzling | |
646 | * @obj: i915 GEM buffer object | |
03ac84f1 | 647 | * @pages: the scattergather list of physical pages |
3271dca4 DV |
648 | * |
649 | * This function fixes up the swizzling in case any page frame number for this | |
650 | * object has changed in bit 17 since that state has been saved with | |
651 | * i915_gem_object_save_bit_17_swizzle(). | |
652 | * | |
653 | * This is called when pinning backing storage again, since the kernel is free | |
654 | * to move unpinned backing storage around (either by directly moving pages or | |
655 | * by swapping them out and back in again). | |
656 | */ | |
7f96ecaf | 657 | void |
03ac84f1 CW |
658 | i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj, |
659 | struct sg_table *pages) | |
7f96ecaf | 660 | { |
85d1225e DG |
661 | struct sgt_iter sgt_iter; |
662 | struct page *page; | |
7f96ecaf DV |
663 | int i; |
664 | ||
665 | if (obj->bit_17 == NULL) | |
666 | return; | |
667 | ||
668 | i = 0; | |
03ac84f1 | 669 | for_each_sgt_page(page, sgt_iter, pages) { |
7f96ecaf | 670 | char new_bit_17 = page_to_phys(page) >> 17; |
03ac84f1 | 671 | if ((new_bit_17 & 0x1) != (test_bit(i, obj->bit_17) != 0)) { |
7f96ecaf DV |
672 | i915_gem_swizzle_page(page); |
673 | set_page_dirty(page); | |
674 | } | |
675 | i++; | |
676 | } | |
677 | } | |
678 | ||
3271dca4 DV |
679 | /** |
680 | * i915_gem_object_save_bit_17_swizzle - save bit 17 swizzling | |
681 | * @obj: i915 GEM buffer object | |
03ac84f1 | 682 | * @pages: the scattergather list of physical pages |
3271dca4 DV |
683 | * |
684 | * This function saves the bit 17 of each page frame number so that swizzling | |
685 | * can be fixed up later on with i915_gem_object_do_bit_17_swizzle(). This must | |
686 | * be called before the backing storage can be unpinned. | |
687 | */ | |
7f96ecaf | 688 | void |
03ac84f1 CW |
689 | i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj, |
690 | struct sg_table *pages) | |
7f96ecaf | 691 | { |
03ac84f1 | 692 | const unsigned int page_count = obj->base.size >> PAGE_SHIFT; |
85d1225e DG |
693 | struct sgt_iter sgt_iter; |
694 | struct page *page; | |
7f96ecaf DV |
695 | int i; |
696 | ||
697 | if (obj->bit_17 == NULL) { | |
698 | obj->bit_17 = kcalloc(BITS_TO_LONGS(page_count), | |
699 | sizeof(long), GFP_KERNEL); | |
700 | if (obj->bit_17 == NULL) { | |
701 | DRM_ERROR("Failed to allocate memory for bit 17 " | |
702 | "record\n"); | |
703 | return; | |
704 | } | |
705 | } | |
706 | ||
707 | i = 0; | |
85d1225e | 708 | |
03ac84f1 | 709 | for_each_sgt_page(page, sgt_iter, pages) { |
85d1225e | 710 | if (page_to_phys(page) & (1 << 17)) |
7f96ecaf DV |
711 | __set_bit(i, obj->bit_17); |
712 | else | |
713 | __clear_bit(i, obj->bit_17); | |
714 | i++; | |
715 | } | |
716 | } |