1 // SPDX-License-Identifier: MIT
3 * Copyright © 2020 Intel Corporation
6 #include <linux/stop_machine.h>
8 #include <asm/set_memory.h>
11 #include <drm/i915_drm.h>
13 #include "gem/i915_gem_lmem.h"
17 #include "i915_scatterlist.h"
18 #include "i915_vgpu.h"
20 #include "intel_gtt.h"
21 #include "gen8_ppgtt.h"
23 static void i915_ggtt_color_adjust(const struct drm_mm_node *node,
28 if (i915_node_color_differs(node, color))
29 *start += I915_GTT_PAGE_SIZE;
32 * Also leave a space between the unallocated reserved node after the
33 * GTT and any objects within the GTT, i.e. we use the color adjustment
34 * to insert a guard page to prevent prefetches crossing over the
37 node = list_next_entry(node, node_list);
38 if (node->color != color)
39 *end -= I915_GTT_PAGE_SIZE;
42 static int ggtt_init_hw(struct i915_ggtt *ggtt)
44 struct drm_i915_private *i915 = ggtt->vm.i915;
46 i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT);
48 ggtt->vm.is_ggtt = true;
50 /* Only VLV supports read-only GGTT mappings */
51 ggtt->vm.has_read_only = IS_VALLEYVIEW(i915);
53 if (!HAS_LLC(i915) && !HAS_PPGTT(i915))
54 ggtt->vm.mm.color_adjust = i915_ggtt_color_adjust;
56 if (ggtt->mappable_end) {
57 if (!io_mapping_init_wc(&ggtt->iomap,
59 ggtt->mappable_end)) {
60 ggtt->vm.cleanup(&ggtt->vm);
64 ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start,
68 intel_ggtt_init_fences(ggtt);
74 * i915_ggtt_init_hw - Initialize GGTT hardware
77 int i915_ggtt_init_hw(struct drm_i915_private *i915)
82 * Note that we use page colouring to enforce a guard page at the
83 * end of the address space. This is required as the CS may prefetch
84 * beyond the end of the batch buffer, across the page boundary,
85 * and beyond the end of the GTT if we do not provide a guard.
87 ret = ggtt_init_hw(to_gt(i915)->ggtt);
95 * Certain Gen5 chipsets require idling the GPU before
96 * unmapping anything from the GTT when VT-d is enabled.
98 static bool needs_idle_maps(struct drm_i915_private *i915)
101 * Query intel_iommu to see if we need the workaround. Presumably that
104 if (!intel_vtd_active(i915))
107 if (GRAPHICS_VER(i915) == 5 && IS_MOBILE(i915))
110 if (GRAPHICS_VER(i915) == 12)
111 return true; /* XXX DMAR fault reason 7 */
116 void i915_ggtt_suspend(struct i915_ggtt *ggtt)
118 struct i915_vma *vma, *vn;
121 mutex_lock(&ggtt->vm.mutex);
123 /* Skip rewriting PTE on VMA unbind. */
124 open = atomic_xchg(&ggtt->vm.open, 0);
126 list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) {
127 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
128 i915_vma_wait_for_bind(vma);
130 if (i915_vma_is_pinned(vma))
133 if (!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) {
134 __i915_vma_evict(vma);
135 drm_mm_remove_node(&vma->node);
139 ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
140 ggtt->invalidate(ggtt);
141 atomic_set(&ggtt->vm.open, open);
143 mutex_unlock(&ggtt->vm.mutex);
145 intel_gt_check_and_clear_faults(ggtt->vm.gt);
148 void gen6_ggtt_invalidate(struct i915_ggtt *ggtt)
150 struct intel_uncore *uncore = ggtt->vm.gt->uncore;
152 spin_lock_irq(&uncore->lock);
153 intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
154 intel_uncore_read_fw(uncore, GFX_FLSH_CNTL_GEN6);
155 spin_unlock_irq(&uncore->lock);
158 static void gen8_ggtt_invalidate(struct i915_ggtt *ggtt)
160 struct intel_uncore *uncore = ggtt->vm.gt->uncore;
163 * Note that as an uncached mmio write, this will flush the
164 * WCB of the writes into the GGTT before it triggers the invalidate.
166 intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
169 static void guc_ggtt_invalidate(struct i915_ggtt *ggtt)
171 struct intel_uncore *uncore = ggtt->vm.gt->uncore;
172 struct drm_i915_private *i915 = ggtt->vm.i915;
174 gen8_ggtt_invalidate(ggtt);
176 if (GRAPHICS_VER(i915) >= 12)
177 intel_uncore_write_fw(uncore, GEN12_GUC_TLB_INV_CR,
178 GEN12_GUC_TLB_INV_CR_INVALIDATE);
180 intel_uncore_write_fw(uncore, GEN8_GTCR, GEN8_GTCR_INVALIDATE);
183 static void gmch_ggtt_invalidate(struct i915_ggtt *ggtt)
185 intel_gtt_chipset_flush();
188 u64 gen8_ggtt_pte_encode(dma_addr_t addr,
189 enum i915_cache_level level,
192 gen8_pte_t pte = addr | GEN8_PAGE_PRESENT;
195 pte |= GEN12_GGTT_PTE_LM;
200 static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
205 static void gen8_ggtt_insert_page(struct i915_address_space *vm,
208 enum i915_cache_level level,
211 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
212 gen8_pte_t __iomem *pte =
213 (gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
215 gen8_set_pte(pte, gen8_ggtt_pte_encode(addr, level, flags));
217 ggtt->invalidate(ggtt);
220 static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
221 struct i915_vma *vma,
222 enum i915_cache_level level,
225 const gen8_pte_t pte_encode = gen8_ggtt_pte_encode(0, level, flags);
226 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
227 gen8_pte_t __iomem *gte;
228 gen8_pte_t __iomem *end;
229 struct sgt_iter iter;
233 * Note that we ignore PTE_READ_ONLY here. The caller must be careful
234 * not to allow the user to override access to a read only page.
237 gte = (gen8_pte_t __iomem *)ggtt->gsm;
238 gte += vma->node.start / I915_GTT_PAGE_SIZE;
239 end = gte + vma->node.size / I915_GTT_PAGE_SIZE;
241 for_each_sgt_daddr(addr, iter, vma->pages)
242 gen8_set_pte(gte++, pte_encode | addr);
243 GEM_BUG_ON(gte > end);
245 /* Fill the allocated but "unused" space beyond the end of the buffer */
247 gen8_set_pte(gte++, vm->scratch[0]->encode);
250 * We want to flush the TLBs only after we're certain all the PTE
251 * updates have finished.
253 ggtt->invalidate(ggtt);
256 static void gen6_ggtt_insert_page(struct i915_address_space *vm,
259 enum i915_cache_level level,
262 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
263 gen6_pte_t __iomem *pte =
264 (gen6_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
266 iowrite32(vm->pte_encode(addr, level, flags), pte);
268 ggtt->invalidate(ggtt);
272 * Binds an object into the global gtt with the specified cache level.
273 * The object will be accessible to the GPU via commands whose operands
274 * reference offsets within the global GTT as well as accessible by the GPU
275 * through the GMADR mapped BAR (i915->mm.gtt->gtt).
277 static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
278 struct i915_vma *vma,
279 enum i915_cache_level level,
282 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
283 gen6_pte_t __iomem *gte;
284 gen6_pte_t __iomem *end;
285 struct sgt_iter iter;
288 gte = (gen6_pte_t __iomem *)ggtt->gsm;
289 gte += vma->node.start / I915_GTT_PAGE_SIZE;
290 end = gte + vma->node.size / I915_GTT_PAGE_SIZE;
292 for_each_sgt_daddr(addr, iter, vma->pages)
293 iowrite32(vm->pte_encode(addr, level, flags), gte++);
294 GEM_BUG_ON(gte > end);
296 /* Fill the allocated but "unused" space beyond the end of the buffer */
298 iowrite32(vm->scratch[0]->encode, gte++);
301 * We want to flush the TLBs only after we're certain all the PTE
302 * updates have finished.
304 ggtt->invalidate(ggtt);
307 static void nop_clear_range(struct i915_address_space *vm,
308 u64 start, u64 length)
312 static void gen8_ggtt_clear_range(struct i915_address_space *vm,
313 u64 start, u64 length)
315 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
316 unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
317 unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
318 const gen8_pte_t scratch_pte = vm->scratch[0]->encode;
319 gen8_pte_t __iomem *gtt_base =
320 (gen8_pte_t __iomem *)ggtt->gsm + first_entry;
321 const int max_entries = ggtt_total_entries(ggtt) - first_entry;
324 if (WARN(num_entries > max_entries,
325 "First entry = %d; Num entries = %d (max=%d)\n",
326 first_entry, num_entries, max_entries))
327 num_entries = max_entries;
329 for (i = 0; i < num_entries; i++)
330 gen8_set_pte(>t_base[i], scratch_pte);
333 static void bxt_vtd_ggtt_wa(struct i915_address_space *vm)
336 * Make sure the internal GAM fifo has been cleared of all GTT
337 * writes before exiting stop_machine(). This guarantees that
338 * any aperture accesses waiting to start in another process
339 * cannot back up behind the GTT writes causing a hang.
340 * The register can be any arbitrary GAM register.
342 intel_uncore_posting_read_fw(vm->gt->uncore, GFX_FLSH_CNTL_GEN6);
346 struct i915_address_space *vm;
349 enum i915_cache_level level;
352 static int bxt_vtd_ggtt_insert_page__cb(void *_arg)
354 struct insert_page *arg = _arg;
356 gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0);
357 bxt_vtd_ggtt_wa(arg->vm);
362 static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
365 enum i915_cache_level level,
368 struct insert_page arg = { vm, addr, offset, level };
370 stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL);
373 struct insert_entries {
374 struct i915_address_space *vm;
375 struct i915_vma *vma;
376 enum i915_cache_level level;
380 static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
382 struct insert_entries *arg = _arg;
384 gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, arg->flags);
385 bxt_vtd_ggtt_wa(arg->vm);
390 static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
391 struct i915_vma *vma,
392 enum i915_cache_level level,
395 struct insert_entries arg = { vm, vma, level, flags };
397 stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
400 static void gen6_ggtt_clear_range(struct i915_address_space *vm,
401 u64 start, u64 length)
403 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
404 unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
405 unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
406 gen6_pte_t scratch_pte, __iomem *gtt_base =
407 (gen6_pte_t __iomem *)ggtt->gsm + first_entry;
408 const int max_entries = ggtt_total_entries(ggtt) - first_entry;
411 if (WARN(num_entries > max_entries,
412 "First entry = %d; Num entries = %d (max=%d)\n",
413 first_entry, num_entries, max_entries))
414 num_entries = max_entries;
416 scratch_pte = vm->scratch[0]->encode;
417 for (i = 0; i < num_entries; i++)
418 iowrite32(scratch_pte, >t_base[i]);
421 static void i915_ggtt_insert_page(struct i915_address_space *vm,
424 enum i915_cache_level cache_level,
427 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
428 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
430 intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
433 static void i915_ggtt_insert_entries(struct i915_address_space *vm,
434 struct i915_vma *vma,
435 enum i915_cache_level cache_level,
438 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
439 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
441 intel_gtt_insert_sg_entries(vma->pages, vma->node.start >> PAGE_SHIFT,
445 static void i915_ggtt_clear_range(struct i915_address_space *vm,
446 u64 start, u64 length)
448 intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT);
451 static void ggtt_bind_vma(struct i915_address_space *vm,
452 struct i915_vm_pt_stash *stash,
453 struct i915_vma *vma,
454 enum i915_cache_level cache_level,
457 struct drm_i915_gem_object *obj = vma->obj;
460 if (i915_vma_is_bound(vma, ~flags & I915_VMA_BIND_MASK))
463 /* Applicable to VLV (gen8+ do not support RO in the GGTT) */
465 if (i915_gem_object_is_readonly(obj))
466 pte_flags |= PTE_READ_ONLY;
467 if (i915_gem_object_is_lmem(obj))
470 vm->insert_entries(vm, vma, cache_level, pte_flags);
471 vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
474 static void ggtt_unbind_vma(struct i915_address_space *vm, struct i915_vma *vma)
476 vm->clear_range(vm, vma->node.start, vma->size);
479 static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt)
484 if (!intel_uc_uses_guc(&ggtt->vm.gt->uc))
487 GEM_BUG_ON(ggtt->vm.total <= GUC_GGTT_TOP);
488 size = ggtt->vm.total - GUC_GGTT_TOP;
490 ret = i915_gem_gtt_reserve(&ggtt->vm, &ggtt->uc_fw, size,
491 GUC_GGTT_TOP, I915_COLOR_UNEVICTABLE,
494 drm_dbg(&ggtt->vm.i915->drm,
495 "Failed to reserve top of GGTT for GuC\n");
500 static void ggtt_release_guc_top(struct i915_ggtt *ggtt)
502 if (drm_mm_node_allocated(&ggtt->uc_fw))
503 drm_mm_remove_node(&ggtt->uc_fw);
506 static void cleanup_init_ggtt(struct i915_ggtt *ggtt)
508 ggtt_release_guc_top(ggtt);
509 if (drm_mm_node_allocated(&ggtt->error_capture))
510 drm_mm_remove_node(&ggtt->error_capture);
511 mutex_destroy(&ggtt->error_mutex);
514 static int init_ggtt(struct i915_ggtt *ggtt)
517 * Let GEM Manage all of the aperture.
519 * However, leave one page at the end still bound to the scratch page.
520 * There are a number of places where the hardware apparently prefetches
521 * past the end of the object, and we've seen multiple hangs with the
522 * GPU head pointer stuck in a batchbuffer bound at the last page of the
523 * aperture. One page should be enough to keep any prefetching inside
526 unsigned long hole_start, hole_end;
527 struct drm_mm_node *entry;
531 * GuC requires all resources that we're sharing with it to be placed in
532 * non-WOPCM memory. If GuC is not present or not in use we still need a
533 * small bias as ring wraparound at offset 0 sometimes hangs. No idea
536 ggtt->pin_bias = max_t(u32, I915_GTT_PAGE_SIZE,
537 intel_wopcm_guc_size(&ggtt->vm.i915->wopcm));
539 ret = intel_vgt_balloon(ggtt);
543 mutex_init(&ggtt->error_mutex);
544 if (ggtt->mappable_end) {
546 * Reserve a mappable slot for our lockless error capture.
548 * We strongly prefer taking address 0x0 in order to protect
549 * other critical buffers against accidental overwrites,
550 * as writing to address 0 is a very common mistake.
552 * Since 0 may already be in use by the system (e.g. the BIOS
553 * framebuffer), we let the reservation fail quietly and hope
554 * 0 remains reserved always.
556 * If we fail to reserve 0, and then fail to find any space
557 * for an error-capture, remain silent. We can afford not
558 * to reserve an error_capture node as we have fallback
559 * paths, and we trust that 0 will remain reserved. However,
560 * the only likely reason for failure to insert is a driver
561 * bug, which we expect to cause other failures...
563 ggtt->error_capture.size = I915_GTT_PAGE_SIZE;
564 ggtt->error_capture.color = I915_COLOR_UNEVICTABLE;
565 if (drm_mm_reserve_node(&ggtt->vm.mm, &ggtt->error_capture))
566 drm_mm_insert_node_in_range(&ggtt->vm.mm,
567 &ggtt->error_capture,
568 ggtt->error_capture.size, 0,
569 ggtt->error_capture.color,
570 0, ggtt->mappable_end,
573 if (drm_mm_node_allocated(&ggtt->error_capture))
574 drm_dbg(&ggtt->vm.i915->drm,
575 "Reserved GGTT:[%llx, %llx] for use by error capture\n",
576 ggtt->error_capture.start,
577 ggtt->error_capture.start + ggtt->error_capture.size);
580 * The upper portion of the GuC address space has a sizeable hole
581 * (several MB) that is inaccessible by GuC. Reserve this range within
582 * GGTT as it can comfortably hold GuC/HuC firmware images.
584 ret = ggtt_reserve_guc_top(ggtt);
588 /* Clear any non-preallocated blocks */
589 drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) {
590 drm_dbg(&ggtt->vm.i915->drm,
591 "clearing unused GTT space: [%lx, %lx]\n",
592 hole_start, hole_end);
593 ggtt->vm.clear_range(&ggtt->vm, hole_start,
594 hole_end - hole_start);
597 /* And finally clear the reserved guard page */
598 ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE);
603 cleanup_init_ggtt(ggtt);
607 static void aliasing_gtt_bind_vma(struct i915_address_space *vm,
608 struct i915_vm_pt_stash *stash,
609 struct i915_vma *vma,
610 enum i915_cache_level cache_level,
615 /* Currently applicable only to VLV */
617 if (i915_gem_object_is_readonly(vma->obj))
618 pte_flags |= PTE_READ_ONLY;
620 if (flags & I915_VMA_LOCAL_BIND)
621 ppgtt_bind_vma(&i915_vm_to_ggtt(vm)->alias->vm,
622 stash, vma, cache_level, flags);
624 if (flags & I915_VMA_GLOBAL_BIND)
625 vm->insert_entries(vm, vma, cache_level, pte_flags);
628 static void aliasing_gtt_unbind_vma(struct i915_address_space *vm,
629 struct i915_vma *vma)
631 if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
632 vm->clear_range(vm, vma->node.start, vma->size);
634 if (i915_vma_is_bound(vma, I915_VMA_LOCAL_BIND))
635 ppgtt_unbind_vma(&i915_vm_to_ggtt(vm)->alias->vm, vma);
638 static int init_aliasing_ppgtt(struct i915_ggtt *ggtt)
640 struct i915_vm_pt_stash stash = {};
641 struct i915_ppgtt *ppgtt;
644 ppgtt = i915_ppgtt_create(ggtt->vm.gt, 0);
646 return PTR_ERR(ppgtt);
648 if (GEM_WARN_ON(ppgtt->vm.total < ggtt->vm.total)) {
653 err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, ggtt->vm.total);
657 i915_gem_object_lock(ppgtt->vm.scratch[0], NULL);
658 err = i915_vm_map_pt_stash(&ppgtt->vm, &stash);
659 i915_gem_object_unlock(ppgtt->vm.scratch[0]);
664 * Note we only pre-allocate as far as the end of the global
665 * GTT. On 48b / 4-level page-tables, the difference is very,
666 * very significant! We have to preallocate as GVT/vgpu does
667 * not like the page directory disappearing.
669 ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash, 0, ggtt->vm.total);
672 ggtt->vm.bind_async_flags |= ppgtt->vm.bind_async_flags;
674 GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != ggtt_bind_vma);
675 ggtt->vm.vma_ops.bind_vma = aliasing_gtt_bind_vma;
677 GEM_BUG_ON(ggtt->vm.vma_ops.unbind_vma != ggtt_unbind_vma);
678 ggtt->vm.vma_ops.unbind_vma = aliasing_gtt_unbind_vma;
680 i915_vm_free_pt_stash(&ppgtt->vm, &stash);
684 i915_vm_free_pt_stash(&ppgtt->vm, &stash);
686 i915_vm_put(&ppgtt->vm);
690 static void fini_aliasing_ppgtt(struct i915_ggtt *ggtt)
692 struct i915_ppgtt *ppgtt;
694 ppgtt = fetch_and_zero(&ggtt->alias);
698 i915_vm_put(&ppgtt->vm);
700 ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
701 ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
704 int i915_init_ggtt(struct drm_i915_private *i915)
708 ret = init_ggtt(to_gt(i915)->ggtt);
712 if (INTEL_PPGTT(i915) == INTEL_PPGTT_ALIASING) {
713 ret = init_aliasing_ppgtt(to_gt(i915)->ggtt);
715 cleanup_init_ggtt(to_gt(i915)->ggtt);
721 static void ggtt_cleanup_hw(struct i915_ggtt *ggtt)
723 struct i915_vma *vma, *vn;
725 atomic_set(&ggtt->vm.open, 0);
727 flush_workqueue(ggtt->vm.i915->wq);
729 mutex_lock(&ggtt->vm.mutex);
731 list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link)
732 WARN_ON(__i915_vma_unbind(vma));
734 if (drm_mm_node_allocated(&ggtt->error_capture))
735 drm_mm_remove_node(&ggtt->error_capture);
736 mutex_destroy(&ggtt->error_mutex);
738 ggtt_release_guc_top(ggtt);
739 intel_vgt_deballoon(ggtt);
741 ggtt->vm.cleanup(&ggtt->vm);
743 mutex_unlock(&ggtt->vm.mutex);
744 i915_address_space_fini(&ggtt->vm);
746 arch_phys_wc_del(ggtt->mtrr);
748 if (ggtt->iomap.size)
749 io_mapping_fini(&ggtt->iomap);
753 * i915_ggtt_driver_release - Clean up GGTT hardware initialization
756 void i915_ggtt_driver_release(struct drm_i915_private *i915)
758 struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
760 fini_aliasing_ppgtt(ggtt);
762 intel_ggtt_fini_fences(ggtt);
763 ggtt_cleanup_hw(ggtt);
767 * i915_ggtt_driver_late_release - Cleanup of GGTT that needs to be done after
768 * all free objects have been drained.
771 void i915_ggtt_driver_late_release(struct drm_i915_private *i915)
773 struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
775 GEM_WARN_ON(kref_read(&ggtt->vm.resv_ref) != 1);
776 dma_resv_fini(&ggtt->vm._resv);
779 static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
781 snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
782 snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
783 return snb_gmch_ctl << 20;
786 static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
788 bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
789 bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
791 bdw_gmch_ctl = 1 << bdw_gmch_ctl;
794 /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * I915_GTT_PAGE_SIZE */
795 if (bdw_gmch_ctl > 4)
799 return bdw_gmch_ctl << 20;
802 static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
804 gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
805 gmch_ctrl &= SNB_GMCH_GGMS_MASK;
808 return 1 << (20 + gmch_ctrl);
813 static unsigned int gen6_gttmmadr_size(struct drm_i915_private *i915)
816 * GEN6: GTTMMADR size is 4MB and GTTADR starts at 2MB offset
817 * GEN8: GTTMMADR size is 16MB and GTTADR starts at 8MB offset
819 GEM_BUG_ON(GRAPHICS_VER(i915) < 6);
820 return (GRAPHICS_VER(i915) < 8) ? SZ_4M : SZ_16M;
823 static unsigned int gen6_gttadr_offset(struct drm_i915_private *i915)
825 return gen6_gttmmadr_size(i915) / 2;
828 static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
830 struct drm_i915_private *i915 = ggtt->vm.i915;
831 struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
832 phys_addr_t phys_addr;
836 GEM_WARN_ON(pci_resource_len(pdev, 0) != gen6_gttmmadr_size(i915));
837 phys_addr = pci_resource_start(pdev, 0) + gen6_gttadr_offset(i915);
840 * On BXT+/ICL+ writes larger than 64 bit to the GTT pagetable range
841 * will be dropped. For WC mappings in general we have 64 byte burst
842 * writes when the WC buffer is flushed, so we can't use it, but have to
843 * resort to an uncached mapping. The WC issue is easily caught by the
844 * readback check when writing GTT PTE entries.
846 if (IS_GEN9_LP(i915) || GRAPHICS_VER(i915) >= 11)
847 ggtt->gsm = ioremap(phys_addr, size);
849 ggtt->gsm = ioremap_wc(phys_addr, size);
851 drm_err(&i915->drm, "Failed to map the ggtt page table\n");
855 kref_init(&ggtt->vm.resv_ref);
856 ret = setup_scratch_page(&ggtt->vm);
858 drm_err(&i915->drm, "Scratch setup failed\n");
859 /* iounmap will also get called at remove, but meh */
865 if (i915_gem_object_is_lmem(ggtt->vm.scratch[0]))
868 ggtt->vm.scratch[0]->encode =
869 ggtt->vm.pte_encode(px_dma(ggtt->vm.scratch[0]),
870 I915_CACHE_NONE, pte_flags);
875 static void gen6_gmch_remove(struct i915_address_space *vm)
877 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
883 static struct resource pci_resource(struct pci_dev *pdev, int bar)
885 return (struct resource)DEFINE_RES_MEM(pci_resource_start(pdev, bar),
886 pci_resource_len(pdev, bar));
889 static int gen8_gmch_probe(struct i915_ggtt *ggtt)
891 struct drm_i915_private *i915 = ggtt->vm.i915;
892 struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
896 /* TODO: We're not aware of mappable constraints on gen8 yet */
897 if (!HAS_LMEM(i915)) {
898 ggtt->gmadr = pci_resource(pdev, 2);
899 ggtt->mappable_end = resource_size(&ggtt->gmadr);
902 pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
903 if (IS_CHERRYVIEW(i915))
904 size = chv_get_total_gtt_size(snb_gmch_ctl);
906 size = gen8_get_total_gtt_size(snb_gmch_ctl);
908 ggtt->vm.alloc_pt_dma = alloc_pt_dma;
909 ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
910 ggtt->vm.lmem_pt_obj_flags = I915_BO_ALLOC_PM_EARLY;
912 ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
913 ggtt->vm.cleanup = gen6_gmch_remove;
914 ggtt->vm.insert_page = gen8_ggtt_insert_page;
915 ggtt->vm.clear_range = nop_clear_range;
916 if (intel_scanout_needs_vtd_wa(i915))
917 ggtt->vm.clear_range = gen8_ggtt_clear_range;
919 ggtt->vm.insert_entries = gen8_ggtt_insert_entries;
922 * Serialize GTT updates with aperture access on BXT if VT-d is on,
925 if (intel_vm_no_concurrent_access_wa(i915)) {
926 ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
927 ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL;
928 ggtt->vm.bind_async_flags =
929 I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
932 ggtt->invalidate = gen8_ggtt_invalidate;
934 ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
935 ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
937 ggtt->vm.pte_encode = gen8_ggtt_pte_encode;
939 setup_private_pat(ggtt->vm.gt->uncore);
941 return ggtt_probe_common(ggtt, size);
944 static u64 snb_pte_encode(dma_addr_t addr,
945 enum i915_cache_level level,
948 gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
951 case I915_CACHE_L3_LLC:
953 pte |= GEN6_PTE_CACHE_LLC;
955 case I915_CACHE_NONE:
956 pte |= GEN6_PTE_UNCACHED;
965 static u64 ivb_pte_encode(dma_addr_t addr,
966 enum i915_cache_level level,
969 gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
972 case I915_CACHE_L3_LLC:
973 pte |= GEN7_PTE_CACHE_L3_LLC;
976 pte |= GEN6_PTE_CACHE_LLC;
978 case I915_CACHE_NONE:
979 pte |= GEN6_PTE_UNCACHED;
988 static u64 byt_pte_encode(dma_addr_t addr,
989 enum i915_cache_level level,
992 gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
994 if (!(flags & PTE_READ_ONLY))
995 pte |= BYT_PTE_WRITEABLE;
997 if (level != I915_CACHE_NONE)
998 pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
1003 static u64 hsw_pte_encode(dma_addr_t addr,
1004 enum i915_cache_level level,
1007 gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
1009 if (level != I915_CACHE_NONE)
1010 pte |= HSW_WB_LLC_AGE3;
1015 static u64 iris_pte_encode(dma_addr_t addr,
1016 enum i915_cache_level level,
1019 gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
1022 case I915_CACHE_NONE:
1025 pte |= HSW_WT_ELLC_LLC_AGE3;
1028 pte |= HSW_WB_ELLC_LLC_AGE3;
1035 static int gen6_gmch_probe(struct i915_ggtt *ggtt)
1037 struct drm_i915_private *i915 = ggtt->vm.i915;
1038 struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
1042 ggtt->gmadr = pci_resource(pdev, 2);
1043 ggtt->mappable_end = resource_size(&ggtt->gmadr);
1046 * 64/512MB is the current min/max we actually know of, but this is
1047 * just a coarse sanity check.
1049 if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) {
1050 drm_err(&i915->drm, "Unknown GMADR size (%pa)\n",
1051 &ggtt->mappable_end);
1055 pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
1057 size = gen6_get_total_gtt_size(snb_gmch_ctl);
1058 ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE;
1060 ggtt->vm.alloc_pt_dma = alloc_pt_dma;
1061 ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
1063 ggtt->vm.clear_range = nop_clear_range;
1064 if (!HAS_FULL_PPGTT(i915) || intel_scanout_needs_vtd_wa(i915))
1065 ggtt->vm.clear_range = gen6_ggtt_clear_range;
1066 ggtt->vm.insert_page = gen6_ggtt_insert_page;
1067 ggtt->vm.insert_entries = gen6_ggtt_insert_entries;
1068 ggtt->vm.cleanup = gen6_gmch_remove;
1070 ggtt->invalidate = gen6_ggtt_invalidate;
1072 if (HAS_EDRAM(i915))
1073 ggtt->vm.pte_encode = iris_pte_encode;
1074 else if (IS_HASWELL(i915))
1075 ggtt->vm.pte_encode = hsw_pte_encode;
1076 else if (IS_VALLEYVIEW(i915))
1077 ggtt->vm.pte_encode = byt_pte_encode;
1078 else if (GRAPHICS_VER(i915) >= 7)
1079 ggtt->vm.pte_encode = ivb_pte_encode;
1081 ggtt->vm.pte_encode = snb_pte_encode;
1083 ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
1084 ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
1086 return ggtt_probe_common(ggtt, size);
1089 static void i915_gmch_remove(struct i915_address_space *vm)
1091 intel_gmch_remove();
1094 static int i915_gmch_probe(struct i915_ggtt *ggtt)
1096 struct drm_i915_private *i915 = ggtt->vm.i915;
1097 phys_addr_t gmadr_base;
1100 ret = intel_gmch_probe(i915->bridge_dev, to_pci_dev(i915->drm.dev), NULL);
1102 drm_err(&i915->drm, "failed to set up gmch\n");
1106 intel_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end);
1109 (struct resource)DEFINE_RES_MEM(gmadr_base, ggtt->mappable_end);
1111 ggtt->vm.alloc_pt_dma = alloc_pt_dma;
1112 ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
1114 if (needs_idle_maps(i915)) {
1115 drm_notice(&i915->drm,
1116 "Flushing DMA requests before IOMMU unmaps; performance may be degraded\n");
1117 ggtt->do_idle_maps = true;
1120 ggtt->vm.insert_page = i915_ggtt_insert_page;
1121 ggtt->vm.insert_entries = i915_ggtt_insert_entries;
1122 ggtt->vm.clear_range = i915_ggtt_clear_range;
1123 ggtt->vm.cleanup = i915_gmch_remove;
1125 ggtt->invalidate = gmch_ggtt_invalidate;
1127 ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
1128 ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
1130 if (unlikely(ggtt->do_idle_maps))
1131 drm_notice(&i915->drm,
1132 "Applying Ironlake quirks for intel_iommu\n");
1137 static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt)
1139 struct drm_i915_private *i915 = gt->i915;
1143 ggtt->vm.i915 = i915;
1144 ggtt->vm.dma = i915->drm.dev;
1145 dma_resv_init(&ggtt->vm._resv);
1147 if (GRAPHICS_VER(i915) <= 5)
1148 ret = i915_gmch_probe(ggtt);
1149 else if (GRAPHICS_VER(i915) < 8)
1150 ret = gen6_gmch_probe(ggtt);
1152 ret = gen8_gmch_probe(ggtt);
1154 dma_resv_fini(&ggtt->vm._resv);
1158 if ((ggtt->vm.total - 1) >> 32) {
1160 "We never expected a Global GTT with more than 32bits"
1161 " of address space! Found %lldM!\n",
1162 ggtt->vm.total >> 20);
1163 ggtt->vm.total = 1ULL << 32;
1164 ggtt->mappable_end =
1165 min_t(u64, ggtt->mappable_end, ggtt->vm.total);
1168 if (ggtt->mappable_end > ggtt->vm.total) {
1170 "mappable aperture extends past end of GGTT,"
1171 " aperture=%pa, total=%llx\n",
1172 &ggtt->mappable_end, ggtt->vm.total);
1173 ggtt->mappable_end = ggtt->vm.total;
1176 /* GMADR is the PCI mmio aperture into the global GTT. */
1177 drm_dbg(&i915->drm, "GGTT size = %lluM\n", ggtt->vm.total >> 20);
1178 drm_dbg(&i915->drm, "GMADR size = %lluM\n",
1179 (u64)ggtt->mappable_end >> 20);
1180 drm_dbg(&i915->drm, "DSM size = %lluM\n",
1181 (u64)resource_size(&intel_graphics_stolen_res) >> 20);
1187 * i915_ggtt_probe_hw - Probe GGTT hardware location
1188 * @i915: i915 device
1190 int i915_ggtt_probe_hw(struct drm_i915_private *i915)
1194 ret = ggtt_probe_hw(to_gt(i915)->ggtt, to_gt(i915));
1198 if (intel_vtd_active(i915))
1199 drm_info(&i915->drm, "VT-d active for gfx access\n");
1204 int i915_ggtt_enable_hw(struct drm_i915_private *i915)
1206 if (GRAPHICS_VER(i915) < 6 && !intel_enable_gtt())
1212 void i915_ggtt_enable_guc(struct i915_ggtt *ggtt)
1214 GEM_BUG_ON(ggtt->invalidate != gen8_ggtt_invalidate);
1216 ggtt->invalidate = guc_ggtt_invalidate;
1218 ggtt->invalidate(ggtt);
1221 void i915_ggtt_disable_guc(struct i915_ggtt *ggtt)
1223 /* XXX Temporary pardon for error unload */
1224 if (ggtt->invalidate == gen8_ggtt_invalidate)
1227 /* We should only be called after i915_ggtt_enable_guc() */
1228 GEM_BUG_ON(ggtt->invalidate != guc_ggtt_invalidate);
1230 ggtt->invalidate = gen8_ggtt_invalidate;
1232 ggtt->invalidate(ggtt);
1235 void i915_ggtt_resume(struct i915_ggtt *ggtt)
1237 struct i915_vma *vma;
1241 intel_gt_check_and_clear_faults(ggtt->vm.gt);
1243 /* First fill our portion of the GTT with scratch pages */
1244 ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
1246 /* Skip rewriting PTE on VMA unbind. */
1247 open = atomic_xchg(&ggtt->vm.open, 0);
1249 /* clflush objects bound into the GGTT and rebind them. */
1250 list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link) {
1251 struct drm_i915_gem_object *obj = vma->obj;
1252 unsigned int was_bound =
1253 atomic_read(&vma->flags) & I915_VMA_BIND_MASK;
1255 GEM_BUG_ON(!was_bound);
1256 vma->ops->bind_vma(&ggtt->vm, NULL, vma,
1257 obj ? obj->cache_level : 0,
1259 if (obj) { /* only used during resume => exclusive access */
1260 flush |= fetch_and_zero(&obj->write_domain);
1261 obj->read_domains |= I915_GEM_DOMAIN_GTT;
1265 atomic_set(&ggtt->vm.open, open);
1266 ggtt->invalidate(ggtt);
1269 wbinvd_on_all_cpus();
1271 if (GRAPHICS_VER(ggtt->vm.i915) >= 8)
1272 setup_private_pat(ggtt->vm.gt->uncore);
1274 intel_ggtt_restore_fences(ggtt);