1 // SPDX-License-Identifier: MIT
3 * Copyright © 2020 Intel Corporation
6 #include <asm/set_memory.h>
8 #include <linux/types.h>
9 #include <linux/stop_machine.h>
11 #include <drm/i915_drm.h>
12 #include <drm/intel-gtt.h>
14 #include "gem/i915_gem_lmem.h"
16 #include "intel_ggtt_gmch.h"
18 #include "intel_gt_regs.h"
20 #include "i915_scatterlist.h"
21 #include "i915_utils.h"
22 #include "i915_vgpu.h"
24 #include "intel_gtt.h"
25 #include "gen8_ppgtt.h"
27 static inline bool suspend_retains_ptes(struct i915_address_space *vm)
29 return GRAPHICS_VER(vm->i915) >= 8 &&
30 !HAS_LMEM(vm->i915) &&
34 static void i915_ggtt_color_adjust(const struct drm_mm_node *node,
39 if (i915_node_color_differs(node, color))
40 *start += I915_GTT_PAGE_SIZE;
43 * Also leave a space between the unallocated reserved node after the
44 * GTT and any objects within the GTT, i.e. we use the color adjustment
45 * to insert a guard page to prevent prefetches crossing over the
48 node = list_next_entry(node, node_list);
49 if (node->color != color)
50 *end -= I915_GTT_PAGE_SIZE;
53 static int ggtt_init_hw(struct i915_ggtt *ggtt)
55 struct drm_i915_private *i915 = ggtt->vm.i915;
57 i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT);
59 ggtt->vm.is_ggtt = true;
61 /* Only VLV supports read-only GGTT mappings */
62 ggtt->vm.has_read_only = IS_VALLEYVIEW(i915);
64 if (!HAS_LLC(i915) && !HAS_PPGTT(i915))
65 ggtt->vm.mm.color_adjust = i915_ggtt_color_adjust;
67 if (ggtt->mappable_end) {
68 if (!io_mapping_init_wc(&ggtt->iomap,
70 ggtt->mappable_end)) {
71 ggtt->vm.cleanup(&ggtt->vm);
75 ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start,
79 intel_ggtt_init_fences(ggtt);
85 * i915_ggtt_init_hw - Initialize GGTT hardware
88 int i915_ggtt_init_hw(struct drm_i915_private *i915)
93 * Note that we use page colouring to enforce a guard page at the
94 * end of the address space. This is required as the CS may prefetch
95 * beyond the end of the batch buffer, across the page boundary,
96 * and beyond the end of the GTT if we do not provide a guard.
98 ret = ggtt_init_hw(to_gt(i915)->ggtt);
106 * Return the value of the last GGTT pte cast to an u64, if
107 * the system is supposed to retain ptes across resume. 0 otherwise.
109 static u64 read_last_pte(struct i915_address_space *vm)
111 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
112 gen8_pte_t __iomem *ptep;
114 if (!suspend_retains_ptes(vm))
117 GEM_BUG_ON(GRAPHICS_VER(vm->i915) < 8);
118 ptep = (typeof(ptep))ggtt->gsm + (ggtt_total_entries(ggtt) - 1);
123 * i915_ggtt_suspend_vm - Suspend the memory mappings for a GGTT or DPT VM
124 * @vm: The VM to suspend the mappings for
126 * Suspend the memory mappings for all objects mapped to HW via the GGTT or a
129 void i915_ggtt_suspend_vm(struct i915_address_space *vm)
131 struct i915_vma *vma, *vn;
132 int save_skip_rewrite;
134 drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt);
137 i915_gem_drain_freed_objects(vm->i915);
139 mutex_lock(&vm->mutex);
142 * Skip rewriting PTE on VMA unbind.
143 * FIXME: Use an argument to i915_vma_unbind() instead?
145 save_skip_rewrite = vm->skip_pte_rewrite;
146 vm->skip_pte_rewrite = true;
148 list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) {
149 struct drm_i915_gem_object *obj = vma->obj;
151 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
153 if (i915_vma_is_pinned(vma) || !i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
156 /* unlikely to race when GPU is idle, so no worry about slowpath.. */
157 if (WARN_ON(!i915_gem_object_trylock(obj, NULL))) {
159 * No dead objects should appear here, GPU should be
160 * completely idle, and userspace suspended
162 i915_gem_object_get(obj);
164 mutex_unlock(&vm->mutex);
166 i915_gem_object_lock(obj, NULL);
167 GEM_WARN_ON(i915_vma_unbind(vma));
168 i915_gem_object_unlock(obj);
169 i915_gem_object_put(obj);
171 vm->skip_pte_rewrite = save_skip_rewrite;
175 if (!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) {
176 i915_vma_wait_for_bind(vma);
178 __i915_vma_evict(vma, false);
179 drm_mm_remove_node(&vma->node);
182 i915_gem_object_unlock(obj);
185 if (!suspend_retains_ptes(vm))
186 vm->clear_range(vm, 0, vm->total);
188 i915_vm_to_ggtt(vm)->probed_pte = read_last_pte(vm);
190 vm->skip_pte_rewrite = save_skip_rewrite;
192 mutex_unlock(&vm->mutex);
195 void i915_ggtt_suspend(struct i915_ggtt *ggtt)
197 i915_ggtt_suspend_vm(&ggtt->vm);
198 ggtt->invalidate(ggtt);
200 intel_gt_check_and_clear_faults(ggtt->vm.gt);
203 void gen6_ggtt_invalidate(struct i915_ggtt *ggtt)
205 struct intel_uncore *uncore = ggtt->vm.gt->uncore;
207 spin_lock_irq(&uncore->lock);
208 intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
209 intel_uncore_read_fw(uncore, GFX_FLSH_CNTL_GEN6);
210 spin_unlock_irq(&uncore->lock);
213 static void gen8_ggtt_invalidate(struct i915_ggtt *ggtt)
215 struct intel_uncore *uncore = ggtt->vm.gt->uncore;
218 * Note that as an uncached mmio write, this will flush the
219 * WCB of the writes into the GGTT before it triggers the invalidate.
221 intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
224 static void guc_ggtt_invalidate(struct i915_ggtt *ggtt)
226 struct intel_uncore *uncore = ggtt->vm.gt->uncore;
227 struct drm_i915_private *i915 = ggtt->vm.i915;
229 gen8_ggtt_invalidate(ggtt);
231 if (GRAPHICS_VER(i915) >= 12)
232 intel_uncore_write_fw(uncore, GEN12_GUC_TLB_INV_CR,
233 GEN12_GUC_TLB_INV_CR_INVALIDATE);
235 intel_uncore_write_fw(uncore, GEN8_GTCR, GEN8_GTCR_INVALIDATE);
238 u64 gen8_ggtt_pte_encode(dma_addr_t addr,
239 enum i915_cache_level level,
242 gen8_pte_t pte = addr | GEN8_PAGE_PRESENT;
245 pte |= GEN12_GGTT_PTE_LM;
250 static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
255 static void gen8_ggtt_insert_page(struct i915_address_space *vm,
258 enum i915_cache_level level,
261 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
262 gen8_pte_t __iomem *pte =
263 (gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
265 gen8_set_pte(pte, gen8_ggtt_pte_encode(addr, level, flags));
267 ggtt->invalidate(ggtt);
270 static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
271 struct i915_vma_resource *vma_res,
272 enum i915_cache_level level,
275 const gen8_pte_t pte_encode = gen8_ggtt_pte_encode(0, level, flags);
276 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
277 gen8_pte_t __iomem *gte;
278 gen8_pte_t __iomem *end;
279 struct sgt_iter iter;
283 * Note that we ignore PTE_READ_ONLY here. The caller must be careful
284 * not to allow the user to override access to a read only page.
287 gte = (gen8_pte_t __iomem *)ggtt->gsm;
288 gte += vma_res->start / I915_GTT_PAGE_SIZE;
289 end = gte + vma_res->node_size / I915_GTT_PAGE_SIZE;
291 for_each_sgt_daddr(addr, iter, vma_res->bi.pages)
292 gen8_set_pte(gte++, pte_encode | addr);
293 GEM_BUG_ON(gte > end);
295 /* Fill the allocated but "unused" space beyond the end of the buffer */
297 gen8_set_pte(gte++, vm->scratch[0]->encode);
300 * We want to flush the TLBs only after we're certain all the PTE
301 * updates have finished.
303 ggtt->invalidate(ggtt);
306 static void gen6_ggtt_insert_page(struct i915_address_space *vm,
309 enum i915_cache_level level,
312 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
313 gen6_pte_t __iomem *pte =
314 (gen6_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
316 iowrite32(vm->pte_encode(addr, level, flags), pte);
318 ggtt->invalidate(ggtt);
322 * Binds an object into the global gtt with the specified cache level.
323 * The object will be accessible to the GPU via commands whose operands
324 * reference offsets within the global GTT as well as accessible by the GPU
325 * through the GMADR mapped BAR (i915->mm.gtt->gtt).
327 static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
328 struct i915_vma_resource *vma_res,
329 enum i915_cache_level level,
332 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
333 gen6_pte_t __iomem *gte;
334 gen6_pte_t __iomem *end;
335 struct sgt_iter iter;
338 gte = (gen6_pte_t __iomem *)ggtt->gsm;
339 gte += vma_res->start / I915_GTT_PAGE_SIZE;
340 end = gte + vma_res->node_size / I915_GTT_PAGE_SIZE;
342 for_each_sgt_daddr(addr, iter, vma_res->bi.pages)
343 iowrite32(vm->pte_encode(addr, level, flags), gte++);
344 GEM_BUG_ON(gte > end);
346 /* Fill the allocated but "unused" space beyond the end of the buffer */
348 iowrite32(vm->scratch[0]->encode, gte++);
351 * We want to flush the TLBs only after we're certain all the PTE
352 * updates have finished.
354 ggtt->invalidate(ggtt);
357 static void nop_clear_range(struct i915_address_space *vm,
358 u64 start, u64 length)
362 static void gen8_ggtt_clear_range(struct i915_address_space *vm,
363 u64 start, u64 length)
365 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
366 unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
367 unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
368 const gen8_pte_t scratch_pte = vm->scratch[0]->encode;
369 gen8_pte_t __iomem *gtt_base =
370 (gen8_pte_t __iomem *)ggtt->gsm + first_entry;
371 const int max_entries = ggtt_total_entries(ggtt) - first_entry;
374 if (WARN(num_entries > max_entries,
375 "First entry = %d; Num entries = %d (max=%d)\n",
376 first_entry, num_entries, max_entries))
377 num_entries = max_entries;
379 for (i = 0; i < num_entries; i++)
380 gen8_set_pte(>t_base[i], scratch_pte);
383 static void bxt_vtd_ggtt_wa(struct i915_address_space *vm)
386 * Make sure the internal GAM fifo has been cleared of all GTT
387 * writes before exiting stop_machine(). This guarantees that
388 * any aperture accesses waiting to start in another process
389 * cannot back up behind the GTT writes causing a hang.
390 * The register can be any arbitrary GAM register.
392 intel_uncore_posting_read_fw(vm->gt->uncore, GFX_FLSH_CNTL_GEN6);
396 struct i915_address_space *vm;
399 enum i915_cache_level level;
402 static int bxt_vtd_ggtt_insert_page__cb(void *_arg)
404 struct insert_page *arg = _arg;
406 gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0);
407 bxt_vtd_ggtt_wa(arg->vm);
412 static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
415 enum i915_cache_level level,
418 struct insert_page arg = { vm, addr, offset, level };
420 stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL);
423 struct insert_entries {
424 struct i915_address_space *vm;
425 struct i915_vma_resource *vma_res;
426 enum i915_cache_level level;
430 static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
432 struct insert_entries *arg = _arg;
434 gen8_ggtt_insert_entries(arg->vm, arg->vma_res, arg->level, arg->flags);
435 bxt_vtd_ggtt_wa(arg->vm);
440 static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
441 struct i915_vma_resource *vma_res,
442 enum i915_cache_level level,
445 struct insert_entries arg = { vm, vma_res, level, flags };
447 stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
450 static void gen6_ggtt_clear_range(struct i915_address_space *vm,
451 u64 start, u64 length)
453 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
454 unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
455 unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
456 gen6_pte_t scratch_pte, __iomem *gtt_base =
457 (gen6_pte_t __iomem *)ggtt->gsm + first_entry;
458 const int max_entries = ggtt_total_entries(ggtt) - first_entry;
461 if (WARN(num_entries > max_entries,
462 "First entry = %d; Num entries = %d (max=%d)\n",
463 first_entry, num_entries, max_entries))
464 num_entries = max_entries;
466 scratch_pte = vm->scratch[0]->encode;
467 for (i = 0; i < num_entries; i++)
468 iowrite32(scratch_pte, >t_base[i]);
471 void intel_ggtt_bind_vma(struct i915_address_space *vm,
472 struct i915_vm_pt_stash *stash,
473 struct i915_vma_resource *vma_res,
474 enum i915_cache_level cache_level,
479 if (vma_res->bound_flags & (~flags & I915_VMA_BIND_MASK))
482 vma_res->bound_flags |= flags;
484 /* Applicable to VLV (gen8+ do not support RO in the GGTT) */
486 if (vma_res->bi.readonly)
487 pte_flags |= PTE_READ_ONLY;
488 if (vma_res->bi.lmem)
491 vm->insert_entries(vm, vma_res, cache_level, pte_flags);
492 vma_res->page_sizes_gtt = I915_GTT_PAGE_SIZE;
495 void intel_ggtt_unbind_vma(struct i915_address_space *vm,
496 struct i915_vma_resource *vma_res)
498 vm->clear_range(vm, vma_res->start, vma_res->vma_size);
501 static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt)
506 if (!intel_uc_uses_guc(&ggtt->vm.gt->uc))
509 GEM_BUG_ON(ggtt->vm.total <= GUC_GGTT_TOP);
510 size = ggtt->vm.total - GUC_GGTT_TOP;
512 ret = i915_gem_gtt_reserve(&ggtt->vm, NULL, &ggtt->uc_fw, size,
513 GUC_GGTT_TOP, I915_COLOR_UNEVICTABLE,
516 drm_dbg(&ggtt->vm.i915->drm,
517 "Failed to reserve top of GGTT for GuC\n");
522 static void ggtt_release_guc_top(struct i915_ggtt *ggtt)
524 if (drm_mm_node_allocated(&ggtt->uc_fw))
525 drm_mm_remove_node(&ggtt->uc_fw);
528 static void cleanup_init_ggtt(struct i915_ggtt *ggtt)
530 ggtt_release_guc_top(ggtt);
531 if (drm_mm_node_allocated(&ggtt->error_capture))
532 drm_mm_remove_node(&ggtt->error_capture);
533 mutex_destroy(&ggtt->error_mutex);
536 static int init_ggtt(struct i915_ggtt *ggtt)
539 * Let GEM Manage all of the aperture.
541 * However, leave one page at the end still bound to the scratch page.
542 * There are a number of places where the hardware apparently prefetches
543 * past the end of the object, and we've seen multiple hangs with the
544 * GPU head pointer stuck in a batchbuffer bound at the last page of the
545 * aperture. One page should be enough to keep any prefetching inside
548 unsigned long hole_start, hole_end;
549 struct drm_mm_node *entry;
552 ggtt->pte_lost = true;
555 * GuC requires all resources that we're sharing with it to be placed in
556 * non-WOPCM memory. If GuC is not present or not in use we still need a
557 * small bias as ring wraparound at offset 0 sometimes hangs. No idea
560 ggtt->pin_bias = max_t(u32, I915_GTT_PAGE_SIZE,
561 intel_wopcm_guc_size(&ggtt->vm.i915->wopcm));
563 ret = intel_vgt_balloon(ggtt);
567 mutex_init(&ggtt->error_mutex);
568 if (ggtt->mappable_end) {
570 * Reserve a mappable slot for our lockless error capture.
572 * We strongly prefer taking address 0x0 in order to protect
573 * other critical buffers against accidental overwrites,
574 * as writing to address 0 is a very common mistake.
576 * Since 0 may already be in use by the system (e.g. the BIOS
577 * framebuffer), we let the reservation fail quietly and hope
578 * 0 remains reserved always.
580 * If we fail to reserve 0, and then fail to find any space
581 * for an error-capture, remain silent. We can afford not
582 * to reserve an error_capture node as we have fallback
583 * paths, and we trust that 0 will remain reserved. However,
584 * the only likely reason for failure to insert is a driver
585 * bug, which we expect to cause other failures...
587 ggtt->error_capture.size = I915_GTT_PAGE_SIZE;
588 ggtt->error_capture.color = I915_COLOR_UNEVICTABLE;
589 if (drm_mm_reserve_node(&ggtt->vm.mm, &ggtt->error_capture))
590 drm_mm_insert_node_in_range(&ggtt->vm.mm,
591 &ggtt->error_capture,
592 ggtt->error_capture.size, 0,
593 ggtt->error_capture.color,
594 0, ggtt->mappable_end,
597 if (drm_mm_node_allocated(&ggtt->error_capture))
598 drm_dbg(&ggtt->vm.i915->drm,
599 "Reserved GGTT:[%llx, %llx] for use by error capture\n",
600 ggtt->error_capture.start,
601 ggtt->error_capture.start + ggtt->error_capture.size);
604 * The upper portion of the GuC address space has a sizeable hole
605 * (several MB) that is inaccessible by GuC. Reserve this range within
606 * GGTT as it can comfortably hold GuC/HuC firmware images.
608 ret = ggtt_reserve_guc_top(ggtt);
612 /* Clear any non-preallocated blocks */
613 drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) {
614 drm_dbg(&ggtt->vm.i915->drm,
615 "clearing unused GTT space: [%lx, %lx]\n",
616 hole_start, hole_end);
617 ggtt->vm.clear_range(&ggtt->vm, hole_start,
618 hole_end - hole_start);
621 /* And finally clear the reserved guard page */
622 ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE);
627 cleanup_init_ggtt(ggtt);
631 static void aliasing_gtt_bind_vma(struct i915_address_space *vm,
632 struct i915_vm_pt_stash *stash,
633 struct i915_vma_resource *vma_res,
634 enum i915_cache_level cache_level,
639 /* Currently applicable only to VLV */
641 if (vma_res->bi.readonly)
642 pte_flags |= PTE_READ_ONLY;
644 if (flags & I915_VMA_LOCAL_BIND)
645 ppgtt_bind_vma(&i915_vm_to_ggtt(vm)->alias->vm,
646 stash, vma_res, cache_level, flags);
648 if (flags & I915_VMA_GLOBAL_BIND)
649 vm->insert_entries(vm, vma_res, cache_level, pte_flags);
651 vma_res->bound_flags |= flags;
654 static void aliasing_gtt_unbind_vma(struct i915_address_space *vm,
655 struct i915_vma_resource *vma_res)
657 if (vma_res->bound_flags & I915_VMA_GLOBAL_BIND)
658 vm->clear_range(vm, vma_res->start, vma_res->vma_size);
660 if (vma_res->bound_flags & I915_VMA_LOCAL_BIND)
661 ppgtt_unbind_vma(&i915_vm_to_ggtt(vm)->alias->vm, vma_res);
664 static int init_aliasing_ppgtt(struct i915_ggtt *ggtt)
666 struct i915_vm_pt_stash stash = {};
667 struct i915_ppgtt *ppgtt;
670 ppgtt = i915_ppgtt_create(ggtt->vm.gt, 0);
672 return PTR_ERR(ppgtt);
674 if (GEM_WARN_ON(ppgtt->vm.total < ggtt->vm.total)) {
679 err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, ggtt->vm.total);
683 i915_gem_object_lock(ppgtt->vm.scratch[0], NULL);
684 err = i915_vm_map_pt_stash(&ppgtt->vm, &stash);
685 i915_gem_object_unlock(ppgtt->vm.scratch[0]);
690 * Note we only pre-allocate as far as the end of the global
691 * GTT. On 48b / 4-level page-tables, the difference is very,
692 * very significant! We have to preallocate as GVT/vgpu does
693 * not like the page directory disappearing.
695 ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash, 0, ggtt->vm.total);
698 ggtt->vm.bind_async_flags |= ppgtt->vm.bind_async_flags;
700 GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != intel_ggtt_bind_vma);
701 ggtt->vm.vma_ops.bind_vma = aliasing_gtt_bind_vma;
703 GEM_BUG_ON(ggtt->vm.vma_ops.unbind_vma != intel_ggtt_unbind_vma);
704 ggtt->vm.vma_ops.unbind_vma = aliasing_gtt_unbind_vma;
706 i915_vm_free_pt_stash(&ppgtt->vm, &stash);
710 i915_vm_free_pt_stash(&ppgtt->vm, &stash);
712 i915_vm_put(&ppgtt->vm);
716 static void fini_aliasing_ppgtt(struct i915_ggtt *ggtt)
718 struct i915_ppgtt *ppgtt;
720 ppgtt = fetch_and_zero(&ggtt->alias);
724 i915_vm_put(&ppgtt->vm);
726 ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma;
727 ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma;
730 int i915_init_ggtt(struct drm_i915_private *i915)
734 ret = init_ggtt(to_gt(i915)->ggtt);
738 if (INTEL_PPGTT(i915) == INTEL_PPGTT_ALIASING) {
739 ret = init_aliasing_ppgtt(to_gt(i915)->ggtt);
741 cleanup_init_ggtt(to_gt(i915)->ggtt);
747 static void ggtt_cleanup_hw(struct i915_ggtt *ggtt)
749 struct i915_vma *vma, *vn;
751 flush_workqueue(ggtt->vm.i915->wq);
752 i915_gem_drain_freed_objects(ggtt->vm.i915);
754 mutex_lock(&ggtt->vm.mutex);
756 ggtt->vm.skip_pte_rewrite = true;
758 list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) {
759 struct drm_i915_gem_object *obj = vma->obj;
762 trylock = i915_gem_object_trylock(obj, NULL);
765 WARN_ON(__i915_vma_unbind(vma));
767 i915_gem_object_unlock(obj);
770 if (drm_mm_node_allocated(&ggtt->error_capture))
771 drm_mm_remove_node(&ggtt->error_capture);
772 mutex_destroy(&ggtt->error_mutex);
774 ggtt_release_guc_top(ggtt);
775 intel_vgt_deballoon(ggtt);
777 ggtt->vm.cleanup(&ggtt->vm);
779 mutex_unlock(&ggtt->vm.mutex);
780 i915_address_space_fini(&ggtt->vm);
782 arch_phys_wc_del(ggtt->mtrr);
784 if (ggtt->iomap.size)
785 io_mapping_fini(&ggtt->iomap);
789 * i915_ggtt_driver_release - Clean up GGTT hardware initialization
792 void i915_ggtt_driver_release(struct drm_i915_private *i915)
794 struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
796 fini_aliasing_ppgtt(ggtt);
798 intel_ggtt_fini_fences(ggtt);
799 ggtt_cleanup_hw(ggtt);
803 * i915_ggtt_driver_late_release - Cleanup of GGTT that needs to be done after
804 * all free objects have been drained.
807 void i915_ggtt_driver_late_release(struct drm_i915_private *i915)
809 struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
811 GEM_WARN_ON(kref_read(&ggtt->vm.resv_ref) != 1);
812 dma_resv_fini(&ggtt->vm._resv);
815 static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
817 snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
818 snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
819 return snb_gmch_ctl << 20;
822 static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
824 bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
825 bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
827 bdw_gmch_ctl = 1 << bdw_gmch_ctl;
830 /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * I915_GTT_PAGE_SIZE */
831 if (bdw_gmch_ctl > 4)
835 return bdw_gmch_ctl << 20;
838 static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
840 gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
841 gmch_ctrl &= SNB_GMCH_GGMS_MASK;
844 return 1 << (20 + gmch_ctrl);
849 static unsigned int gen6_gttmmadr_size(struct drm_i915_private *i915)
852 * GEN6: GTTMMADR size is 4MB and GTTADR starts at 2MB offset
853 * GEN8: GTTMMADR size is 16MB and GTTADR starts at 8MB offset
855 GEM_BUG_ON(GRAPHICS_VER(i915) < 6);
856 return (GRAPHICS_VER(i915) < 8) ? SZ_4M : SZ_16M;
859 static unsigned int gen6_gttadr_offset(struct drm_i915_private *i915)
861 return gen6_gttmmadr_size(i915) / 2;
864 static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
866 struct drm_i915_private *i915 = ggtt->vm.i915;
867 struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
868 phys_addr_t phys_addr;
872 GEM_WARN_ON(pci_resource_len(pdev, 0) != gen6_gttmmadr_size(i915));
873 phys_addr = pci_resource_start(pdev, 0) + gen6_gttadr_offset(i915);
876 * On BXT+/ICL+ writes larger than 64 bit to the GTT pagetable range
877 * will be dropped. For WC mappings in general we have 64 byte burst
878 * writes when the WC buffer is flushed, so we can't use it, but have to
879 * resort to an uncached mapping. The WC issue is easily caught by the
880 * readback check when writing GTT PTE entries.
882 if (IS_GEN9_LP(i915) || GRAPHICS_VER(i915) >= 11)
883 ggtt->gsm = ioremap(phys_addr, size);
885 ggtt->gsm = ioremap_wc(phys_addr, size);
887 drm_err(&i915->drm, "Failed to map the ggtt page table\n");
891 kref_init(&ggtt->vm.resv_ref);
892 ret = setup_scratch_page(&ggtt->vm);
894 drm_err(&i915->drm, "Scratch setup failed\n");
895 /* iounmap will also get called at remove, but meh */
901 if (i915_gem_object_is_lmem(ggtt->vm.scratch[0]))
904 ggtt->vm.scratch[0]->encode =
905 ggtt->vm.pte_encode(px_dma(ggtt->vm.scratch[0]),
906 I915_CACHE_NONE, pte_flags);
911 static void gen6_gmch_remove(struct i915_address_space *vm)
913 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
919 static struct resource pci_resource(struct pci_dev *pdev, int bar)
921 return (struct resource)DEFINE_RES_MEM(pci_resource_start(pdev, bar),
922 pci_resource_len(pdev, bar));
925 static int gen8_gmch_probe(struct i915_ggtt *ggtt)
927 struct drm_i915_private *i915 = ggtt->vm.i915;
928 struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
932 if (!HAS_LMEM(i915) && !HAS_BAR2_SMEM_STOLEN(i915)) {
933 ggtt->gmadr = pci_resource(pdev, 2);
934 ggtt->mappable_end = resource_size(&ggtt->gmadr);
937 pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
938 if (IS_CHERRYVIEW(i915))
939 size = chv_get_total_gtt_size(snb_gmch_ctl);
941 size = gen8_get_total_gtt_size(snb_gmch_ctl);
943 ggtt->vm.alloc_pt_dma = alloc_pt_dma;
944 ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
945 ggtt->vm.lmem_pt_obj_flags = I915_BO_ALLOC_PM_EARLY;
947 ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
948 ggtt->vm.cleanup = gen6_gmch_remove;
949 ggtt->vm.insert_page = gen8_ggtt_insert_page;
950 ggtt->vm.clear_range = nop_clear_range;
951 if (intel_scanout_needs_vtd_wa(i915))
952 ggtt->vm.clear_range = gen8_ggtt_clear_range;
954 ggtt->vm.insert_entries = gen8_ggtt_insert_entries;
957 * Serialize GTT updates with aperture access on BXT if VT-d is on,
960 if (intel_vm_no_concurrent_access_wa(i915)) {
961 ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
962 ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL;
965 * Calling stop_machine() version of GGTT update function
966 * at error capture/reset path will raise lockdep warning.
967 * Allow calling gen8_ggtt_insert_* directly at reset path
968 * which is safe from parallel GGTT updates.
970 ggtt->vm.raw_insert_page = gen8_ggtt_insert_page;
971 ggtt->vm.raw_insert_entries = gen8_ggtt_insert_entries;
973 ggtt->vm.bind_async_flags =
974 I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
977 ggtt->invalidate = gen8_ggtt_invalidate;
979 ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma;
980 ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma;
982 ggtt->vm.pte_encode = gen8_ggtt_pte_encode;
984 setup_private_pat(ggtt->vm.gt->uncore);
986 return ggtt_probe_common(ggtt, size);
989 static u64 snb_pte_encode(dma_addr_t addr,
990 enum i915_cache_level level,
993 gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
996 case I915_CACHE_L3_LLC:
998 pte |= GEN6_PTE_CACHE_LLC;
1000 case I915_CACHE_NONE:
1001 pte |= GEN6_PTE_UNCACHED;
1004 MISSING_CASE(level);
1010 static u64 ivb_pte_encode(dma_addr_t addr,
1011 enum i915_cache_level level,
1014 gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
1017 case I915_CACHE_L3_LLC:
1018 pte |= GEN7_PTE_CACHE_L3_LLC;
1020 case I915_CACHE_LLC:
1021 pte |= GEN6_PTE_CACHE_LLC;
1023 case I915_CACHE_NONE:
1024 pte |= GEN6_PTE_UNCACHED;
1027 MISSING_CASE(level);
1033 static u64 byt_pte_encode(dma_addr_t addr,
1034 enum i915_cache_level level,
1037 gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
1039 if (!(flags & PTE_READ_ONLY))
1040 pte |= BYT_PTE_WRITEABLE;
1042 if (level != I915_CACHE_NONE)
1043 pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
1048 static u64 hsw_pte_encode(dma_addr_t addr,
1049 enum i915_cache_level level,
1052 gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
1054 if (level != I915_CACHE_NONE)
1055 pte |= HSW_WB_LLC_AGE3;
1060 static u64 iris_pte_encode(dma_addr_t addr,
1061 enum i915_cache_level level,
1064 gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
1067 case I915_CACHE_NONE:
1070 pte |= HSW_WT_ELLC_LLC_AGE3;
1073 pte |= HSW_WB_ELLC_LLC_AGE3;
1080 static int gen6_gmch_probe(struct i915_ggtt *ggtt)
1082 struct drm_i915_private *i915 = ggtt->vm.i915;
1083 struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
1087 ggtt->gmadr = pci_resource(pdev, 2);
1088 ggtt->mappable_end = resource_size(&ggtt->gmadr);
1091 * 64/512MB is the current min/max we actually know of, but this is
1092 * just a coarse sanity check.
1094 if (ggtt->mappable_end < (64 << 20) ||
1095 ggtt->mappable_end > (512 << 20)) {
1096 drm_err(&i915->drm, "Unknown GMADR size (%pa)\n",
1097 &ggtt->mappable_end);
1101 pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
1103 size = gen6_get_total_gtt_size(snb_gmch_ctl);
1104 ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE;
1106 ggtt->vm.alloc_pt_dma = alloc_pt_dma;
1107 ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
1109 ggtt->vm.clear_range = nop_clear_range;
1110 if (!HAS_FULL_PPGTT(i915) || intel_scanout_needs_vtd_wa(i915))
1111 ggtt->vm.clear_range = gen6_ggtt_clear_range;
1112 ggtt->vm.insert_page = gen6_ggtt_insert_page;
1113 ggtt->vm.insert_entries = gen6_ggtt_insert_entries;
1114 ggtt->vm.cleanup = gen6_gmch_remove;
1116 ggtt->invalidate = gen6_ggtt_invalidate;
1118 if (HAS_EDRAM(i915))
1119 ggtt->vm.pte_encode = iris_pte_encode;
1120 else if (IS_HASWELL(i915))
1121 ggtt->vm.pte_encode = hsw_pte_encode;
1122 else if (IS_VALLEYVIEW(i915))
1123 ggtt->vm.pte_encode = byt_pte_encode;
1124 else if (GRAPHICS_VER(i915) >= 7)
1125 ggtt->vm.pte_encode = ivb_pte_encode;
1127 ggtt->vm.pte_encode = snb_pte_encode;
1129 ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma;
1130 ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma;
1132 return ggtt_probe_common(ggtt, size);
1135 static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt)
1137 struct drm_i915_private *i915 = gt->i915;
1141 ggtt->vm.i915 = i915;
1142 ggtt->vm.dma = i915->drm.dev;
1143 dma_resv_init(&ggtt->vm._resv);
1145 if (GRAPHICS_VER(i915) >= 8)
1146 ret = gen8_gmch_probe(ggtt);
1147 else if (GRAPHICS_VER(i915) >= 6)
1148 ret = gen6_gmch_probe(ggtt);
1150 ret = intel_ggtt_gmch_probe(ggtt);
1153 dma_resv_fini(&ggtt->vm._resv);
1157 if ((ggtt->vm.total - 1) >> 32) {
1159 "We never expected a Global GTT with more than 32bits"
1160 " of address space! Found %lldM!\n",
1161 ggtt->vm.total >> 20);
1162 ggtt->vm.total = 1ULL << 32;
1163 ggtt->mappable_end =
1164 min_t(u64, ggtt->mappable_end, ggtt->vm.total);
1167 if (ggtt->mappable_end > ggtt->vm.total) {
1169 "mappable aperture extends past end of GGTT,"
1170 " aperture=%pa, total=%llx\n",
1171 &ggtt->mappable_end, ggtt->vm.total);
1172 ggtt->mappable_end = ggtt->vm.total;
1175 /* GMADR is the PCI mmio aperture into the global GTT. */
1176 drm_dbg(&i915->drm, "GGTT size = %lluM\n", ggtt->vm.total >> 20);
1177 drm_dbg(&i915->drm, "GMADR size = %lluM\n",
1178 (u64)ggtt->mappable_end >> 20);
1179 drm_dbg(&i915->drm, "DSM size = %lluM\n",
1180 (u64)resource_size(&intel_graphics_stolen_res) >> 20);
1186 * i915_ggtt_probe_hw - Probe GGTT hardware location
1187 * @i915: i915 device
1189 int i915_ggtt_probe_hw(struct drm_i915_private *i915)
1193 ret = ggtt_probe_hw(to_gt(i915)->ggtt, to_gt(i915));
1197 if (i915_vtd_active(i915))
1198 drm_info(&i915->drm, "VT-d active for gfx access\n");
1203 int i915_ggtt_enable_hw(struct drm_i915_private *i915)
1205 if (GRAPHICS_VER(i915) < 6)
1206 return intel_ggtt_gmch_enable_hw(i915);
1211 void i915_ggtt_enable_guc(struct i915_ggtt *ggtt)
1213 GEM_BUG_ON(ggtt->invalidate != gen8_ggtt_invalidate);
1215 ggtt->invalidate = guc_ggtt_invalidate;
1217 ggtt->invalidate(ggtt);
1220 void i915_ggtt_disable_guc(struct i915_ggtt *ggtt)
1222 /* XXX Temporary pardon for error unload */
1223 if (ggtt->invalidate == gen8_ggtt_invalidate)
1226 /* We should only be called after i915_ggtt_enable_guc() */
1227 GEM_BUG_ON(ggtt->invalidate != guc_ggtt_invalidate);
1229 ggtt->invalidate = gen8_ggtt_invalidate;
1231 ggtt->invalidate(ggtt);
1235 * i915_ggtt_resume_vm - Restore the memory mappings for a GGTT or DPT VM
1236 * @vm: The VM to restore the mappings for
1238 * Restore the memory mappings for all objects mapped to HW via the GGTT or a
1241 * Returns %true if restoring the mapping for any object that was in a write
1242 * domain before suspend.
1244 bool i915_ggtt_resume_vm(struct i915_address_space *vm)
1246 struct i915_vma *vma;
1247 bool write_domain_objs = false;
1250 drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt);
1253 * First fill our portion of the GTT with scratch pages if
1254 * they were not retained across suspend.
1256 retained_ptes = suspend_retains_ptes(vm) &&
1257 !i915_vm_to_ggtt(vm)->pte_lost &&
1258 !GEM_WARN_ON(i915_vm_to_ggtt(vm)->probed_pte != read_last_pte(vm));
1261 vm->clear_range(vm, 0, vm->total);
1263 /* clflush objects bound into the GGTT and rebind them. */
1264 list_for_each_entry(vma, &vm->bound_list, vm_link) {
1265 struct drm_i915_gem_object *obj = vma->obj;
1266 unsigned int was_bound =
1267 atomic_read(&vma->flags) & I915_VMA_BIND_MASK;
1269 GEM_BUG_ON(!was_bound);
1271 vma->ops->bind_vma(vm, NULL, vma->resource,
1272 obj ? obj->cache_level : 0,
1274 if (obj) { /* only used during resume => exclusive access */
1275 write_domain_objs |= fetch_and_zero(&obj->write_domain);
1276 obj->read_domains |= I915_GEM_DOMAIN_GTT;
1280 return write_domain_objs;
1283 void i915_ggtt_resume(struct i915_ggtt *ggtt)
1287 intel_gt_check_and_clear_faults(ggtt->vm.gt);
1289 flush = i915_ggtt_resume_vm(&ggtt->vm);
1291 ggtt->invalidate(ggtt);
1294 wbinvd_on_all_cpus();
1296 if (GRAPHICS_VER(ggtt->vm.i915) >= 8)
1297 setup_private_pat(ggtt->vm.gt->uncore);
1299 intel_ggtt_restore_fences(ggtt);
1302 void i915_ggtt_mark_pte_lost(struct drm_i915_private *i915, bool val)
1304 to_gt(i915)->ggtt->pte_lost = val;