1 // SPDX-License-Identifier: MIT
3 * Copyright © 2020 Intel Corporation
6 #include <linux/slab.h> /* fault-inject.h is not standalone! */
8 #include <linux/fault-inject.h>
10 #include "gem/i915_gem_lmem.h"
11 #include "i915_trace.h"
13 #include "intel_gtt.h"
15 struct drm_i915_gem_object *alloc_pt_lmem(struct i915_address_space *vm, int sz)
17 struct drm_i915_gem_object *obj;
19 obj = i915_gem_object_create_lmem(vm->i915, sz, 0);
21 * Ensure all paging structures for this vm share the same dma-resv
22 * object underneath, with the idea that one object_lock() will lock
26 obj->base.resv = &vm->resv;
30 struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz)
32 struct drm_i915_gem_object *obj;
34 if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
35 i915_gem_shrink_all(vm->i915);
37 obj = i915_gem_object_create_internal(vm->i915, sz);
39 * Ensure all paging structures for this vm share the same dma-resv
40 * object underneath, with the idea that one object_lock() will lock
44 obj->base.resv = &vm->resv;
48 int map_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj)
50 enum i915_map_type type;
53 type = i915_coherent_map_type(vm->i915, obj, true);
54 vaddr = i915_gem_object_pin_map_unlocked(obj, type);
56 return PTR_ERR(vaddr);
58 i915_gem_object_make_unshrinkable(obj);
62 int map_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object *obj)
64 enum i915_map_type type;
67 type = i915_coherent_map_type(vm->i915, obj, true);
68 vaddr = i915_gem_object_pin_map(obj, type);
70 return PTR_ERR(vaddr);
72 i915_gem_object_make_unshrinkable(obj);
76 void __i915_vm_close(struct i915_address_space *vm)
78 struct i915_vma *vma, *vn;
80 if (!atomic_dec_and_mutex_lock(&vm->open, &vm->mutex))
83 list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) {
84 struct drm_i915_gem_object *obj = vma->obj;
86 /* Keep the obj (and hence the vma) alive as _we_ destroy it */
87 if (!kref_get_unless_zero(&obj->base.refcount))
90 atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
91 WARN_ON(__i915_vma_unbind(vma));
94 i915_gem_object_put(obj);
96 GEM_BUG_ON(!list_empty(&vm->bound_list));
98 mutex_unlock(&vm->mutex);
101 /* lock the vm into the current ww, if we lock one, we lock all */
102 int i915_vm_lock_objects(struct i915_address_space *vm,
103 struct i915_gem_ww_ctx *ww)
105 if (vm->scratch[0]->base.resv == &vm->resv) {
106 return i915_gem_object_lock(vm->scratch[0], ww);
108 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
110 /* We borrowed the scratch page from ggtt, take the top level object */
111 return i915_gem_object_lock(ppgtt->pd->pt.base, ww);
115 void i915_address_space_fini(struct i915_address_space *vm)
117 drm_mm_takedown(&vm->mm);
118 mutex_destroy(&vm->mutex);
121 static void __i915_vm_release(struct work_struct *work)
123 struct i915_address_space *vm =
124 container_of(work, struct i915_address_space, rcu.work);
127 i915_address_space_fini(vm);
128 dma_resv_fini(&vm->resv);
133 void i915_vm_release(struct kref *kref)
135 struct i915_address_space *vm =
136 container_of(kref, struct i915_address_space, ref);
138 GEM_BUG_ON(i915_is_ggtt(vm));
139 trace_i915_ppgtt_release(vm);
141 queue_rcu_work(vm->i915->wq, &vm->rcu);
144 void i915_address_space_init(struct i915_address_space *vm, int subclass)
147 INIT_RCU_WORK(&vm->rcu, __i915_vm_release);
148 atomic_set(&vm->open, 1);
151 * The vm->mutex must be reclaim safe (for use in the shrinker).
152 * Do a dummy acquire now under fs_reclaim so that any allocation
153 * attempt holding the lock is immediately reported by lockdep.
155 mutex_init(&vm->mutex);
156 lockdep_set_subclass(&vm->mutex, subclass);
157 i915_gem_shrinker_taints_mutex(vm->i915, &vm->mutex);
158 dma_resv_init(&vm->resv);
160 GEM_BUG_ON(!vm->total);
161 drm_mm_init(&vm->mm, 0, vm->total);
162 vm->mm.head_node.color = I915_COLOR_UNEVICTABLE;
164 INIT_LIST_HEAD(&vm->bound_list);
167 void clear_pages(struct i915_vma *vma)
169 GEM_BUG_ON(!vma->pages);
171 if (vma->pages != vma->obj->mm.pages) {
172 sg_free_table(vma->pages);
177 memset(&vma->page_sizes, 0, sizeof(vma->page_sizes));
180 void *__px_vaddr(struct drm_i915_gem_object *p)
182 enum i915_map_type type;
184 GEM_BUG_ON(!i915_gem_object_has_pages(p));
185 return page_unpack_bits(p->mm.mapping, &type);
188 dma_addr_t __px_dma(struct drm_i915_gem_object *p)
190 GEM_BUG_ON(!i915_gem_object_has_pages(p));
191 return sg_dma_address(p->mm.pages->sgl);
194 struct page *__px_page(struct drm_i915_gem_object *p)
196 GEM_BUG_ON(!i915_gem_object_has_pages(p));
197 return sg_page(p->mm.pages->sgl);
201 fill_page_dma(struct drm_i915_gem_object *p, const u64 val, unsigned int count)
203 void *vaddr = __px_vaddr(p);
205 memset64(vaddr, val, count);
206 clflush_cache_range(vaddr, PAGE_SIZE);
209 static void poison_scratch_page(struct drm_i915_gem_object *scratch)
211 void *vaddr = __px_vaddr(scratch);
215 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
218 memset(vaddr, val, scratch->base.size);
221 int setup_scratch_page(struct i915_address_space *vm)
226 * In order to utilize 64K pages for an object with a size < 2M, we will
227 * need to support a 64K scratch page, given that every 16th entry for a
228 * page-table operating in 64K mode must point to a properly aligned 64K
229 * region, including any PTEs which happen to point to scratch.
231 * This is only relevant for the 48b PPGTT where we support
232 * huge-gtt-pages, see also i915_vma_insert(). However, as we share the
233 * scratch (read-only) between all vm, we create one 64k scratch page
236 size = I915_GTT_PAGE_SIZE_4K;
237 if (i915_vm_is_4lvl(vm) &&
238 HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K))
239 size = I915_GTT_PAGE_SIZE_64K;
242 struct drm_i915_gem_object *obj;
244 obj = vm->alloc_pt_dma(vm, size);
248 if (map_pt_dma(vm, obj))
251 /* We need a single contiguous page for our scratch */
252 if (obj->mm.page_sizes.sg < size)
255 /* And it needs to be correspondingly aligned */
256 if (__px_dma(obj) & (size - 1))
260 * Use a non-zero scratch page for debugging.
262 * We want a value that should be reasonably obvious
263 * to spot in the error state, while also causing a GPU hang
264 * if executed. We prefer using a clear page in production, so
265 * should it ever be accidentally used, the effect should be
268 poison_scratch_page(obj);
270 vm->scratch[0] = obj;
271 vm->scratch_order = get_order(size);
275 i915_gem_object_put(obj);
277 if (size == I915_GTT_PAGE_SIZE_4K)
280 size = I915_GTT_PAGE_SIZE_4K;
284 void free_scratch(struct i915_address_space *vm)
288 for (i = 0; i <= vm->top; i++)
289 i915_gem_object_put(vm->scratch[i]);
292 void gtt_write_workarounds(struct intel_gt *gt)
294 struct drm_i915_private *i915 = gt->i915;
295 struct intel_uncore *uncore = gt->uncore;
298 * This function is for gtt related workarounds. This function is
299 * called on driver load and after a GPU reset, so you can place
300 * workarounds here even if they get overwritten by GPU reset.
302 /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl,cnl,icl */
303 if (IS_BROADWELL(i915))
304 intel_uncore_write(uncore,
306 GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
307 else if (IS_CHERRYVIEW(i915))
308 intel_uncore_write(uncore,
310 GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
311 else if (IS_GEN9_LP(i915))
312 intel_uncore_write(uncore,
314 GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
315 else if (INTEL_GEN(i915) >= 9 && INTEL_GEN(i915) <= 11)
316 intel_uncore_write(uncore,
318 GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
321 * To support 64K PTEs we need to first enable the use of the
322 * Intermediate-Page-Size(IPS) bit of the PDE field via some magical
323 * mmio, otherwise the page-walker will simply ignore the IPS bit. This
324 * shouldn't be needed after GEN10.
326 * 64K pages were first introduced from BDW+, although technically they
327 * only *work* from gen9+. For pre-BDW we instead have the option for
328 * 32K pages, but we don't currently have any support for it in our
331 if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_64K) &&
332 INTEL_GEN(i915) <= 10)
333 intel_uncore_rmw(uncore,
334 GEN8_GAMW_ECO_DEV_RW_IA,
336 GAMW_ECO_ENABLE_64K_IPS_FIELD);
338 if (IS_GEN_RANGE(i915, 8, 11)) {
339 bool can_use_gtt_cache = true;
342 * According to the BSpec if we use 2M/1G pages then we also
343 * need to disable the GTT cache. At least on BDW we can see
344 * visual corruption when using 2M pages, and not disabling the
347 if (HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_2M))
348 can_use_gtt_cache = false;
350 /* WaGttCachingOffByDefault */
351 intel_uncore_write(uncore,
353 can_use_gtt_cache ? GTT_CACHE_EN_ALL : 0);
354 drm_WARN_ON_ONCE(&i915->drm, can_use_gtt_cache &&
355 intel_uncore_read(uncore,
356 HSW_GTT_CACHE_EN) == 0);
360 static void tgl_setup_private_ppat(struct intel_uncore *uncore)
362 /* TGL doesn't support LLC or AGE settings */
363 intel_uncore_write(uncore, GEN12_PAT_INDEX(0), GEN8_PPAT_WB);
364 intel_uncore_write(uncore, GEN12_PAT_INDEX(1), GEN8_PPAT_WC);
365 intel_uncore_write(uncore, GEN12_PAT_INDEX(2), GEN8_PPAT_WT);
366 intel_uncore_write(uncore, GEN12_PAT_INDEX(3), GEN8_PPAT_UC);
367 intel_uncore_write(uncore, GEN12_PAT_INDEX(4), GEN8_PPAT_WB);
368 intel_uncore_write(uncore, GEN12_PAT_INDEX(5), GEN8_PPAT_WB);
369 intel_uncore_write(uncore, GEN12_PAT_INDEX(6), GEN8_PPAT_WB);
370 intel_uncore_write(uncore, GEN12_PAT_INDEX(7), GEN8_PPAT_WB);
373 static void cnl_setup_private_ppat(struct intel_uncore *uncore)
375 intel_uncore_write(uncore,
377 GEN8_PPAT_WB | GEN8_PPAT_LLC);
378 intel_uncore_write(uncore,
380 GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);
381 intel_uncore_write(uncore,
383 GEN8_PPAT_WB | GEN8_PPAT_ELLC_OVERRIDE);
384 intel_uncore_write(uncore,
387 intel_uncore_write(uncore,
389 GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
390 intel_uncore_write(uncore,
392 GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
393 intel_uncore_write(uncore,
395 GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
396 intel_uncore_write(uncore,
398 GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
402 * The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
403 * bits. When using advanced contexts each context stores its own PAT, but
404 * writing this data shouldn't be harmful even in those cases.
406 static void bdw_setup_private_ppat(struct intel_uncore *uncore)
408 struct drm_i915_private *i915 = uncore->i915;
411 pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) | /* for normal objects, no eLLC */
412 GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */
413 GEN8_PPAT(3, GEN8_PPAT_UC) | /* Uncached objects, mostly for scanout */
414 GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) |
415 GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) |
416 GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
417 GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
419 /* for scanout with eLLC */
420 if (INTEL_GEN(i915) >= 9)
421 pat |= GEN8_PPAT(2, GEN8_PPAT_WB | GEN8_PPAT_ELLC_OVERRIDE);
423 pat |= GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);
425 intel_uncore_write(uncore, GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
426 intel_uncore_write(uncore, GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
429 static void chv_setup_private_ppat(struct intel_uncore *uncore)
434 * Map WB on BDW to snooped on CHV.
436 * Only the snoop bit has meaning for CHV, the rest is
439 * The hardware will never snoop for certain types of accesses:
440 * - CPU GTT (GMADR->GGTT->no snoop->memory)
441 * - PPGTT page tables
442 * - some other special cycles
444 * As with BDW, we also need to consider the following for GT accesses:
445 * "For GGTT, there is NO pat_sel[2:0] from the entry,
446 * so RTL will always use the value corresponding to
448 * Which means we must set the snoop bit in PAT entry 0
449 * in order to keep the global status page working.
452 pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) |
456 GEN8_PPAT(4, CHV_PPAT_SNOOP) |
457 GEN8_PPAT(5, CHV_PPAT_SNOOP) |
458 GEN8_PPAT(6, CHV_PPAT_SNOOP) |
459 GEN8_PPAT(7, CHV_PPAT_SNOOP);
461 intel_uncore_write(uncore, GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
462 intel_uncore_write(uncore, GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
465 void setup_private_pat(struct intel_uncore *uncore)
467 struct drm_i915_private *i915 = uncore->i915;
469 GEM_BUG_ON(INTEL_GEN(i915) < 8);
471 if (INTEL_GEN(i915) >= 12)
472 tgl_setup_private_ppat(uncore);
473 else if (INTEL_GEN(i915) >= 10)
474 cnl_setup_private_ppat(uncore);
475 else if (IS_CHERRYVIEW(i915) || IS_GEN9_LP(i915))
476 chv_setup_private_ppat(uncore);
478 bdw_setup_private_ppat(uncore);
482 __vm_create_scratch_for_read(struct i915_address_space *vm, unsigned long size)
484 struct drm_i915_gem_object *obj;
485 struct i915_vma *vma;
487 obj = i915_gem_object_create_internal(vm->i915, PAGE_ALIGN(size));
489 return ERR_CAST(obj);
491 i915_gem_object_set_cache_coherency(obj, I915_CACHING_CACHED);
493 vma = i915_vma_instance(obj, vm, NULL);
495 i915_gem_object_put(obj);
503 __vm_create_scratch_for_read_pinned(struct i915_address_space *vm, unsigned long size)
505 struct i915_vma *vma;
508 vma = __vm_create_scratch_for_read(vm, size);
512 err = i915_vma_pin(vma, 0, 0,
513 i915_vma_is_ggtt(vma) ? PIN_GLOBAL : PIN_USER);
522 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
523 #include "selftests/mock_gtt.c"