Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec...
[linux-2.6-block.git] / drivers / gpu / drm / i915 / i915_gem_fence_reg.c
CommitLineData
41a36b73
DV
1/*
2 * Copyright © 2008-2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#include <drm/drmP.h>
25#include <drm/i915_drm.h>
26#include "i915_drv.h"
27
a794f62a
DV
28/**
29 * DOC: fence register handling
30 *
31 * Important to avoid confusions: "fences" in the i915 driver are not execution
32 * fences used to track command completion but hardware detiler objects which
33 * wrap a given range of the global GTT. Each platform has only a fairly limited
34 * set of these objects.
35 *
36 * Fences are used to detile GTT memory mappings. They're also connected to the
34fd3e14
MI
37 * hardware frontbuffer render tracking and hence interact with frontbuffer
38 * compression. Furthermore on older platforms fences are required for tiled
a794f62a
DV
39 * objects used by the display engine. They can also be used by the render
40 * engine - they're required for blitter commands and are optional for render
41 * commands. But on gen4+ both display (with the exception of fbc) and rendering
42 * have their own tiling state bits and don't need fences.
43 *
44 * Also note that fences only support X and Y tiling and hence can't be used for
45 * the fancier new tiling formats like W, Ys and Yf.
46 *
47 * Finally note that because fences are such a restricted resource they're
48 * dynamically associated with objects. Furthermore fence state is committed to
34fd3e14
MI
49 * the hardware lazily to avoid unnecessary stalls on gen2/3. Therefore code must
50 * explicitly call i915_gem_object_get_fence() to synchronize fencing status
a794f62a
DV
51 * for cpu access. Also note that some code wants an unfenced view, for those
52 * cases the fence can be removed forcefully with i915_gem_object_put_fence().
53 *
54 * Internally these functions will synchronize with userspace access by removing
55 * CPU ptes into GTT mmaps (not the GTT ptes themselves) as needed.
56 */
57
49ef5294
CW
58#define pipelined 0
59
60static void i965_write_fence_reg(struct drm_i915_fence_reg *fence,
61 struct i915_vma *vma)
41a36b73 62{
f0f59a00 63 i915_reg_t fence_reg_lo, fence_reg_hi;
41a36b73 64 int fence_pitch_shift;
49ef5294 65 u64 val;
41a36b73 66
49ef5294
CW
67 if (INTEL_INFO(fence->i915)->gen >= 6) {
68 fence_reg_lo = FENCE_REG_GEN6_LO(fence->id);
69 fence_reg_hi = FENCE_REG_GEN6_HI(fence->id);
eecf613a 70 fence_pitch_shift = GEN6_FENCE_PITCH_SHIFT;
49ef5294 71
41a36b73 72 } else {
49ef5294
CW
73 fence_reg_lo = FENCE_REG_965_LO(fence->id);
74 fence_reg_hi = FENCE_REG_965_HI(fence->id);
41a36b73
DV
75 fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
76 }
77
49ef5294
CW
78 val = 0;
79 if (vma) {
80 unsigned int tiling = i915_gem_object_get_tiling(vma->obj);
81 bool is_y_tiled = tiling == I915_TILING_Y;
82 unsigned int stride = i915_gem_object_get_stride(vma->obj);
83 u32 row_size = stride * (is_y_tiled ? 32 : 8);
84 u32 size = rounddown((u32)vma->node.size, row_size);
a83718b6
CW
85
86 val = ((vma->node.start + size - 4096) & 0xfffff000) << 32;
87 val |= vma->node.start & 0xfffff000;
88 val |= (u64)((stride / 128) - 1) << fence_pitch_shift;
49ef5294
CW
89 if (is_y_tiled)
90 val |= BIT(I965_FENCE_TILING_Y_SHIFT);
41a36b73 91 val |= I965_FENCE_REG_VALID;
49ef5294 92 }
41a36b73 93
49ef5294
CW
94 if (!pipelined) {
95 struct drm_i915_private *dev_priv = fence->i915;
96
97 /* To w/a incoherency with non-atomic 64-bit register updates,
98 * we split the 64-bit update into two 32-bit writes. In order
99 * for a partial fence not to be evaluated between writes, we
100 * precede the update with write to turn off the fence register,
101 * and only enable the fence as the last step.
102 *
103 * For extra levels of paranoia, we make sure each step lands
104 * before applying the next step.
105 */
106 I915_WRITE(fence_reg_lo, 0);
107 POSTING_READ(fence_reg_lo);
41a36b73 108
49ef5294
CW
109 I915_WRITE(fence_reg_hi, upper_32_bits(val));
110 I915_WRITE(fence_reg_lo, lower_32_bits(val));
eecf613a 111 POSTING_READ(fence_reg_lo);
41a36b73
DV
112 }
113}
114
49ef5294
CW
115static void i915_write_fence_reg(struct drm_i915_fence_reg *fence,
116 struct i915_vma *vma)
41a36b73 117{
41a36b73
DV
118 u32 val;
119
49ef5294
CW
120 val = 0;
121 if (vma) {
122 unsigned int tiling = i915_gem_object_get_tiling(vma->obj);
123 bool is_y_tiled = tiling == I915_TILING_Y;
124 unsigned int stride = i915_gem_object_get_stride(vma->obj);
41a36b73
DV
125 int pitch_val;
126 int tile_width;
127
a83718b6
CW
128 WARN((vma->node.start & ~I915_FENCE_START_MASK) ||
129 !is_power_of_2(vma->node.size) ||
130 (vma->node.start & (vma->node.size - 1)),
131 "object 0x%08llx [fenceable? %d] not 1M or pot-size (0x%08llx) aligned\n",
05a20d09
CW
132 vma->node.start,
133 i915_vma_is_map_and_fenceable(vma),
134 vma->node.size);
41a36b73 135
49ef5294 136 if (is_y_tiled && HAS_128_BYTE_Y_TILING(fence->i915))
41a36b73
DV
137 tile_width = 128;
138 else
139 tile_width = 512;
140
141 /* Note: pitch better be a power of two tile widths */
3e510a8e 142 pitch_val = stride / tile_width;
41a36b73
DV
143 pitch_val = ffs(pitch_val) - 1;
144
a83718b6 145 val = vma->node.start;
49ef5294
CW
146 if (is_y_tiled)
147 val |= BIT(I830_FENCE_TILING_Y_SHIFT);
a83718b6 148 val |= I915_FENCE_SIZE_BITS(vma->node.size);
41a36b73
DV
149 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
150 val |= I830_FENCE_REG_VALID;
49ef5294 151 }
41a36b73 152
49ef5294
CW
153 if (!pipelined) {
154 struct drm_i915_private *dev_priv = fence->i915;
155 i915_reg_t reg = FENCE_REG(fence->id);
156
157 I915_WRITE(reg, val);
158 POSTING_READ(reg);
159 }
41a36b73
DV
160}
161
49ef5294
CW
162static void i830_write_fence_reg(struct drm_i915_fence_reg *fence,
163 struct i915_vma *vma)
41a36b73 164{
a83718b6 165 u32 val;
41a36b73 166
49ef5294
CW
167 val = 0;
168 if (vma) {
169 unsigned int tiling = i915_gem_object_get_tiling(vma->obj);
170 bool is_y_tiled = tiling == I915_TILING_Y;
171 unsigned int stride = i915_gem_object_get_stride(vma->obj);
a83718b6 172 u32 pitch_val;
41a36b73 173
a83718b6
CW
174 WARN((vma->node.start & ~I830_FENCE_START_MASK) ||
175 !is_power_of_2(vma->node.size) ||
176 (vma->node.start & (vma->node.size - 1)),
177 "object 0x%08llx not 512K or pot-size 0x%08llx aligned\n",
178 vma->node.start, vma->node.size);
41a36b73 179
3e510a8e 180 pitch_val = stride / 128;
41a36b73
DV
181 pitch_val = ffs(pitch_val) - 1;
182
a83718b6 183 val = vma->node.start;
49ef5294
CW
184 if (is_y_tiled)
185 val |= BIT(I830_FENCE_TILING_Y_SHIFT);
a83718b6 186 val |= I830_FENCE_SIZE_BITS(vma->node.size);
41a36b73
DV
187 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
188 val |= I830_FENCE_REG_VALID;
49ef5294 189 }
41a36b73 190
49ef5294
CW
191 if (!pipelined) {
192 struct drm_i915_private *dev_priv = fence->i915;
193 i915_reg_t reg = FENCE_REG(fence->id);
41a36b73 194
49ef5294
CW
195 I915_WRITE(reg, val);
196 POSTING_READ(reg);
197 }
41a36b73
DV
198}
199
49ef5294
CW
200static void fence_write(struct drm_i915_fence_reg *fence,
201 struct i915_vma *vma)
41a36b73 202{
49ef5294
CW
203 /* Previous access through the fence register is marshalled by
204 * the mb() inside the fault handlers (i915_gem_release_mmaps)
205 * and explicitly managed for internal users.
41a36b73 206 */
49ef5294
CW
207
208 if (IS_GEN2(fence->i915))
209 i830_write_fence_reg(fence, vma);
210 else if (IS_GEN3(fence->i915))
211 i915_write_fence_reg(fence, vma);
212 else
213 i965_write_fence_reg(fence, vma);
214
215 /* Access through the fenced region afterwards is
216 * ordered by the posting reads whilst writing the registers.
41a36b73 217 */
41a36b73 218
49ef5294 219 fence->dirty = false;
41a36b73
DV
220}
221
49ef5294
CW
222static int fence_update(struct drm_i915_fence_reg *fence,
223 struct i915_vma *vma)
41a36b73 224{
49ef5294 225 int ret;
41a36b73 226
49ef5294
CW
227 if (vma) {
228 if (!i915_vma_is_map_and_fenceable(vma))
229 return -EINVAL;
41a36b73 230
49ef5294
CW
231 if (WARN(!i915_gem_object_get_stride(vma->obj) ||
232 !i915_gem_object_get_tiling(vma->obj),
233 "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
234 i915_gem_object_get_stride(vma->obj),
235 i915_gem_object_get_tiling(vma->obj)))
236 return -EINVAL;
237
238 ret = i915_gem_active_retire(&vma->last_fence,
239 &vma->obj->base.dev->struct_mutex);
240 if (ret)
241 return ret;
41a36b73 242 }
41a36b73 243
49ef5294
CW
244 if (fence->vma) {
245 ret = i915_gem_active_retire(&fence->vma->last_fence,
246 &fence->vma->obj->base.dev->struct_mutex);
247 if (ret)
248 return ret;
249 }
41a36b73 250
49ef5294
CW
251 if (fence->vma && fence->vma != vma) {
252 /* Ensure that all userspace CPU access is completed before
253 * stealing the fence.
254 */
255 i915_gem_release_mmap(fence->vma->obj);
41a36b73 256
49ef5294
CW
257 fence->vma->fence = NULL;
258 fence->vma = NULL;
259
260 list_move(&fence->link, &fence->i915->mm.fence_list);
261 }
262
263 fence_write(fence, vma);
264
265 if (vma) {
266 if (fence->vma != vma) {
267 vma->fence = fence;
268 fence->vma = vma;
269 }
270
271 list_move_tail(&fence->link, &fence->i915->mm.fence_list);
272 }
273
274 return 0;
41a36b73
DV
275}
276
a794f62a 277/**
49ef5294
CW
278 * i915_vma_put_fence - force-remove fence for a VMA
279 * @vma: vma to map linearly (not through a fence reg)
a794f62a
DV
280 *
281 * This function force-removes any fence from the given object, which is useful
282 * if the kernel wants to do untiled GTT access.
283 *
284 * Returns:
285 *
286 * 0 on success, negative error code on failure.
287 */
41a36b73 288int
49ef5294 289i915_vma_put_fence(struct i915_vma *vma)
41a36b73 290{
49ef5294 291 struct drm_i915_fence_reg *fence = vma->fence;
41a36b73 292
4676dc83
CW
293 assert_rpm_wakelock_held(to_i915(vma->vm->dev));
294
49ef5294 295 if (!fence)
41a36b73
DV
296 return 0;
297
18034584 298 if (fence->pin_count)
41a36b73
DV
299 return -EBUSY;
300
49ef5294 301 return fence_update(fence, NULL);
41a36b73
DV
302}
303
49ef5294 304static struct drm_i915_fence_reg *fence_find(struct drm_i915_private *dev_priv)
41a36b73 305{
49ef5294 306 struct drm_i915_fence_reg *fence;
41a36b73 307
49ef5294
CW
308 list_for_each_entry(fence, &dev_priv->mm.fence_list, link) {
309 if (fence->pin_count)
41a36b73
DV
310 continue;
311
49ef5294 312 return fence;
41a36b73
DV
313 }
314
41a36b73 315 /* Wait for completion of pending flips which consume fences */
49ef5294 316 if (intel_has_pending_fb_unpin(&dev_priv->drm))
41a36b73
DV
317 return ERR_PTR(-EAGAIN);
318
319 return ERR_PTR(-EDEADLK);
320}
321
322/**
49ef5294
CW
323 * i915_vma_get_fence - set up fencing for a vma
324 * @vma: vma to map through a fence reg
41a36b73
DV
325 *
326 * When mapping objects through the GTT, userspace wants to be able to write
327 * to them without having to worry about swizzling if the object is tiled.
328 * This function walks the fence regs looking for a free one for @obj,
329 * stealing one if it can't find any.
330 *
331 * It then sets up the reg based on the object's properties: address, pitch
332 * and tiling format.
333 *
334 * For an untiled surface, this removes any existing fence.
a794f62a
DV
335 *
336 * Returns:
337 *
338 * 0 on success, negative error code on failure.
41a36b73
DV
339 */
340int
49ef5294 341i915_vma_get_fence(struct i915_vma *vma)
41a36b73 342{
49ef5294
CW
343 struct drm_i915_fence_reg *fence;
344 struct i915_vma *set = i915_gem_object_is_tiled(vma->obj) ? vma : NULL;
41a36b73 345
7c108fd8
CW
346 /* Note that we revoke fences on runtime suspend. Therefore the user
347 * must keep the device awake whilst using the fence.
348 */
4676dc83
CW
349 assert_rpm_wakelock_held(to_i915(vma->vm->dev));
350
41a36b73 351 /* Just update our place in the LRU if our fence is getting reused. */
49ef5294
CW
352 if (vma->fence) {
353 fence = vma->fence;
354 if (!fence->dirty) {
355 list_move_tail(&fence->link,
356 &fence->i915->mm.fence_list);
41a36b73
DV
357 return 0;
358 }
49ef5294
CW
359 } else if (set) {
360 fence = fence_find(to_i915(vma->vm->dev));
361 if (IS_ERR(fence))
362 return PTR_ERR(fence);
41a36b73
DV
363 } else
364 return 0;
365
49ef5294 366 return fence_update(fence, set);
41a36b73
DV
367}
368
a794f62a
DV
369/**
370 * i915_gem_restore_fences - restore fence state
4362f4f6 371 * @dev_priv: i915 device private
a794f62a
DV
372 *
373 * Restore the hw fence state to match the software tracking again, to be called
7c108fd8
CW
374 * after a gpu reset and on resume. Note that on runtime suspend we only cancel
375 * the fences, to be reacquired by the user later.
a794f62a 376 */
4362f4f6 377void i915_gem_restore_fences(struct drm_i915_private *dev_priv)
41a36b73 378{
41a36b73
DV
379 int i;
380
381 for (i = 0; i < dev_priv->num_fence_regs; i++) {
382 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
4fc788f5 383 struct i915_vma *vma = reg->vma;
41a36b73
DV
384
385 /*
386 * Commit delayed tiling changes if we have an object still
387 * attached to the fence, otherwise just clear the fence.
388 */
4676dc83
CW
389 if (vma && !i915_gem_object_is_tiled(vma->obj)) {
390 GEM_BUG_ON(!reg->dirty);
275f039d 391 GEM_BUG_ON(!list_empty(&vma->obj->userfault_link));
4676dc83
CW
392
393 list_move(&reg->link, &dev_priv->mm.fence_list);
394 vma->fence = NULL;
4fc788f5 395 vma = NULL;
4676dc83 396 }
4fc788f5 397
4676dc83
CW
398 fence_write(reg, vma);
399 reg->vma = vma;
41a36b73
DV
400 }
401}
7f96ecaf
DV
402
403/**
3271dca4 404 * DOC: tiling swizzling details
7f96ecaf
DV
405 *
406 * The idea behind tiling is to increase cache hit rates by rearranging
407 * pixel data so that a group of pixel accesses are in the same cacheline.
408 * Performance improvement from doing this on the back/depth buffer are on
409 * the order of 30%.
410 *
411 * Intel architectures make this somewhat more complicated, though, by
412 * adjustments made to addressing of data when the memory is in interleaved
413 * mode (matched pairs of DIMMS) to improve memory bandwidth.
414 * For interleaved memory, the CPU sends every sequential 64 bytes
415 * to an alternate memory channel so it can get the bandwidth from both.
416 *
417 * The GPU also rearranges its accesses for increased bandwidth to interleaved
418 * memory, and it matches what the CPU does for non-tiled. However, when tiled
419 * it does it a little differently, since one walks addresses not just in the
420 * X direction but also Y. So, along with alternating channels when bit
421 * 6 of the address flips, it also alternates when other bits flip -- Bits 9
422 * (every 512 bytes, an X tile scanline) and 10 (every two X tile scanlines)
423 * are common to both the 915 and 965-class hardware.
424 *
425 * The CPU also sometimes XORs in higher bits as well, to improve
426 * bandwidth doing strided access like we do so frequently in graphics. This
427 * is called "Channel XOR Randomization" in the MCH documentation. The result
428 * is that the CPU is XORing in either bit 11 or bit 17 to bit 6 of its address
429 * decode.
430 *
431 * All of this bit 6 XORing has an effect on our memory management,
432 * as we need to make sure that the 3d driver can correctly address object
433 * contents.
434 *
435 * If we don't have interleaved memory, all tiling is safe and no swizzling is
436 * required.
437 *
438 * When bit 17 is XORed in, we simply refuse to tile at all. Bit
34fd3e14 439 * 17 is not just a page offset, so as we page an object out and back in,
7f96ecaf
DV
440 * individual pages in it will have different bit 17 addresses, resulting in
441 * each 64 bytes being swapped with its neighbor!
442 *
443 * Otherwise, if interleaved, we have to tell the 3d driver what the address
444 * swizzling it needs to do is, since it's writing with the CPU to the pages
445 * (bit 6 and potentially bit 11 XORed in), and the GPU is reading from the
446 * pages (bit 6, 9, and 10 XORed in), resulting in a cumulative bit swizzling
447 * required by the CPU of XORing in bit 6, 9, 10, and potentially 11, in order
448 * to match what the GPU expects.
449 */
450
451/**
3271dca4 452 * i915_gem_detect_bit_6_swizzle - detect bit 6 swizzling pattern
4362f4f6 453 * @dev_priv: i915 device private
3271dca4 454 *
7f96ecaf
DV
455 * Detects bit 6 swizzling of address lookup between IGD access and CPU
456 * access through main memory.
457 */
458void
4362f4f6 459i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
7f96ecaf 460{
7f96ecaf
DV
461 uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
462 uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
463
11a914c2 464 if (INTEL_GEN(dev_priv) >= 8 || IS_VALLEYVIEW(dev_priv)) {
7f96ecaf
DV
465 /*
466 * On BDW+, swizzling is not used. We leave the CPU memory
467 * controller in charge of optimizing memory accesses without
468 * the extra address manipulation GPU side.
469 *
470 * VLV and CHV don't have GPU swizzling.
471 */
472 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
473 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
4362f4f6 474 } else if (INTEL_GEN(dev_priv) >= 6) {
7f96ecaf
DV
475 if (dev_priv->preserve_bios_swizzle) {
476 if (I915_READ(DISP_ARB_CTL) &
477 DISP_TILE_SURFACE_SWIZZLING) {
478 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
479 swizzle_y = I915_BIT_6_SWIZZLE_9;
480 } else {
481 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
482 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
483 }
484 } else {
485 uint32_t dimm_c0, dimm_c1;
486 dimm_c0 = I915_READ(MAD_DIMM_C0);
487 dimm_c1 = I915_READ(MAD_DIMM_C1);
488 dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
489 dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
490 /* Enable swizzling when the channels are populated
491 * with identically sized dimms. We don't need to check
492 * the 3rd channel because no cpu with gpu attached
493 * ships in that configuration. Also, swizzling only
494 * makes sense for 2 channels anyway. */
495 if (dimm_c0 == dimm_c1) {
496 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
497 swizzle_y = I915_BIT_6_SWIZZLE_9;
498 } else {
499 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
500 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
501 }
502 }
5db94019 503 } else if (IS_GEN5(dev_priv)) {
7f96ecaf
DV
504 /* On Ironlake whatever DRAM config, GPU always do
505 * same swizzling setup.
506 */
507 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
508 swizzle_y = I915_BIT_6_SWIZZLE_9;
5db94019 509 } else if (IS_GEN2(dev_priv)) {
7f96ecaf
DV
510 /* As far as we know, the 865 doesn't have these bit 6
511 * swizzling issues.
512 */
513 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
514 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
5db94019
TU
515 } else if (IS_MOBILE(dev_priv) || (IS_GEN3(dev_priv) &&
516 !IS_G33(dev_priv))) {
7f96ecaf
DV
517 uint32_t dcc;
518
519 /* On 9xx chipsets, channel interleave by the CPU is
520 * determined by DCC. For single-channel, neither the CPU
521 * nor the GPU do swizzling. For dual channel interleaved,
522 * the GPU's interleave is bit 9 and 10 for X tiled, and bit
523 * 9 for Y tiled. The CPU's interleave is independent, and
524 * can be based on either bit 11 (haven't seen this yet) or
525 * bit 17 (common).
526 */
527 dcc = I915_READ(DCC);
528 switch (dcc & DCC_ADDRESSING_MODE_MASK) {
529 case DCC_ADDRESSING_MODE_SINGLE_CHANNEL:
530 case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC:
531 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
532 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
533 break;
534 case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED:
535 if (dcc & DCC_CHANNEL_XOR_DISABLE) {
536 /* This is the base swizzling by the GPU for
537 * tiled buffers.
538 */
539 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
540 swizzle_y = I915_BIT_6_SWIZZLE_9;
541 } else if ((dcc & DCC_CHANNEL_XOR_BIT_17) == 0) {
542 /* Bit 11 swizzling by the CPU in addition. */
543 swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
544 swizzle_y = I915_BIT_6_SWIZZLE_9_11;
545 } else {
546 /* Bit 17 swizzling by the CPU in addition. */
547 swizzle_x = I915_BIT_6_SWIZZLE_9_10_17;
548 swizzle_y = I915_BIT_6_SWIZZLE_9_17;
549 }
550 break;
551 }
552
553 /* check for L-shaped memory aka modified enhanced addressing */
5db94019 554 if (IS_GEN4(dev_priv) &&
0b466dc2
CW
555 !(I915_READ(DCC2) & DCC2_MODIFIED_ENHANCED_DISABLE)) {
556 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
557 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
7f96ecaf
DV
558 }
559
560 if (dcc == 0xffffffff) {
561 DRM_ERROR("Couldn't read from MCHBAR. "
562 "Disabling tiling.\n");
563 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
564 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
565 }
566 } else {
567 /* The 965, G33, and newer, have a very flexible memory
568 * configuration. It will enable dual-channel mode
569 * (interleaving) on as much memory as it can, and the GPU
570 * will additionally sometimes enable different bit 6
571 * swizzling for tiled objects from the CPU.
572 *
573 * Here's what I found on the G965:
574 * slot fill memory size swizzling
575 * 0A 0B 1A 1B 1-ch 2-ch
576 * 512 0 0 0 512 0 O
577 * 512 0 512 0 16 1008 X
578 * 512 0 0 512 16 1008 X
579 * 0 512 0 512 16 1008 X
580 * 1024 1024 1024 0 2048 1024 O
581 *
582 * We could probably detect this based on either the DRB
583 * matching, which was the case for the swizzling required in
584 * the table above, or from the 1-ch value being less than
585 * the minimum size of a rank.
0b466dc2
CW
586 *
587 * Reports indicate that the swizzling actually
588 * varies depending upon page placement inside the
589 * channels, i.e. we see swizzled pages where the
590 * banks of memory are paired and unswizzled on the
591 * uneven portion, so leave that as unknown.
7f96ecaf 592 */
0b466dc2 593 if (I915_READ16(C0DRB3) == I915_READ16(C1DRB3)) {
7f96ecaf
DV
594 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
595 swizzle_y = I915_BIT_6_SWIZZLE_9;
596 }
597 }
598
0b466dc2
CW
599 if (swizzle_x == I915_BIT_6_SWIZZLE_UNKNOWN ||
600 swizzle_y == I915_BIT_6_SWIZZLE_UNKNOWN) {
601 /* Userspace likes to explode if it sees unknown swizzling,
602 * so lie. We will finish the lie when reporting through
603 * the get-tiling-ioctl by reporting the physical swizzle
604 * mode as unknown instead.
605 *
606 * As we don't strictly know what the swizzling is, it may be
607 * bit17 dependent, and so we need to also prevent the pages
608 * from being moved.
609 */
610 dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES;
611 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
612 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
613 }
614
7f96ecaf
DV
615 dev_priv->mm.bit_6_swizzle_x = swizzle_x;
616 dev_priv->mm.bit_6_swizzle_y = swizzle_y;
617}
618
3271dca4 619/*
7f96ecaf
DV
620 * Swap every 64 bytes of this page around, to account for it having a new
621 * bit 17 of its physical address and therefore being interpreted differently
622 * by the GPU.
623 */
624static void
625i915_gem_swizzle_page(struct page *page)
626{
627 char temp[64];
628 char *vaddr;
629 int i;
630
631 vaddr = kmap(page);
632
633 for (i = 0; i < PAGE_SIZE; i += 128) {
634 memcpy(temp, &vaddr[i], 64);
635 memcpy(&vaddr[i], &vaddr[i + 64], 64);
636 memcpy(&vaddr[i + 64], temp, 64);
637 }
638
639 kunmap(page);
640}
641
3271dca4
DV
642/**
643 * i915_gem_object_do_bit_17_swizzle - fixup bit 17 swizzling
644 * @obj: i915 GEM buffer object
03ac84f1 645 * @pages: the scattergather list of physical pages
3271dca4
DV
646 *
647 * This function fixes up the swizzling in case any page frame number for this
648 * object has changed in bit 17 since that state has been saved with
649 * i915_gem_object_save_bit_17_swizzle().
650 *
651 * This is called when pinning backing storage again, since the kernel is free
652 * to move unpinned backing storage around (either by directly moving pages or
653 * by swapping them out and back in again).
654 */
7f96ecaf 655void
03ac84f1
CW
656i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
657 struct sg_table *pages)
7f96ecaf 658{
85d1225e
DG
659 struct sgt_iter sgt_iter;
660 struct page *page;
7f96ecaf
DV
661 int i;
662
663 if (obj->bit_17 == NULL)
664 return;
665
666 i = 0;
03ac84f1 667 for_each_sgt_page(page, sgt_iter, pages) {
7f96ecaf 668 char new_bit_17 = page_to_phys(page) >> 17;
03ac84f1 669 if ((new_bit_17 & 0x1) != (test_bit(i, obj->bit_17) != 0)) {
7f96ecaf
DV
670 i915_gem_swizzle_page(page);
671 set_page_dirty(page);
672 }
673 i++;
674 }
675}
676
3271dca4
DV
677/**
678 * i915_gem_object_save_bit_17_swizzle - save bit 17 swizzling
679 * @obj: i915 GEM buffer object
03ac84f1 680 * @pages: the scattergather list of physical pages
3271dca4
DV
681 *
682 * This function saves the bit 17 of each page frame number so that swizzling
683 * can be fixed up later on with i915_gem_object_do_bit_17_swizzle(). This must
684 * be called before the backing storage can be unpinned.
685 */
7f96ecaf 686void
03ac84f1
CW
687i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
688 struct sg_table *pages)
7f96ecaf 689{
03ac84f1 690 const unsigned int page_count = obj->base.size >> PAGE_SHIFT;
85d1225e
DG
691 struct sgt_iter sgt_iter;
692 struct page *page;
7f96ecaf
DV
693 int i;
694
695 if (obj->bit_17 == NULL) {
696 obj->bit_17 = kcalloc(BITS_TO_LONGS(page_count),
697 sizeof(long), GFP_KERNEL);
698 if (obj->bit_17 == NULL) {
699 DRM_ERROR("Failed to allocate memory for bit 17 "
700 "record\n");
701 return;
702 }
703 }
704
705 i = 0;
85d1225e 706
03ac84f1 707 for_each_sgt_page(page, sgt_iter, pages) {
85d1225e 708 if (page_to_phys(page) & (1 << 17))
7f96ecaf
DV
709 __set_bit(i, obj->bit_17);
710 else
711 __clear_bit(i, obj->bit_17);
712 i++;
713 }
714}