drm/i915/gt: Remove local entries from GGTT on suspend
authorChris Wilson <chris@chris-wilson.co.uk>
Thu, 28 May 2020 08:24:27 +0000 (09:24 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Thu, 28 May 2020 15:55:15 +0000 (16:55 +0100)
Across suspend/resume, we clear the entire GGTT and rebuild from
scratch. In particular, we want to only preserve the global entries for
use by the HW, and delay reinstating the local binds until required by
the user. This means that we can evict any local binds in the global GTT,
saving any time in preserving their state, as they will be rebound on
demand.

References: https://gitlab.freedesktop.org/drm/intel/-/issues/1947
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200528082427.21402-2-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/gt/intel_ggtt.c
drivers/gpu/drm/i915/i915_vma.c
drivers/gpu/drm/i915/i915_vma.h

index ffe285b0b3bd46d7f3709c0db5cb97adf7128e3d..323c328d444a64866c2a11abc75f285a546de5be 100644 (file)
@@ -108,13 +108,32 @@ static bool needs_idle_maps(struct drm_i915_private *i915)
 
 void i915_ggtt_suspend(struct i915_ggtt *ggtt)
 {
-       struct i915_vma *vma;
+       struct i915_vma *vma, *vn;
+       int open;
+
+       mutex_lock(&ggtt->vm.mutex);
+
+       /* Skip rewriting PTE on VMA unbind. */
+       open = atomic_xchg(&ggtt->vm.open, 0);
 
-       list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
+       list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) {
+               GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
                i915_vma_wait_for_bind(vma);
 
+               if (i915_vma_is_pinned(vma))
+                       continue;
+
+               if (!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) {
+                       __i915_vma_evict(vma);
+                       drm_mm_remove_node(&vma->node);
+               }
+       }
+
        ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
        ggtt->invalidate(ggtt);
+       atomic_set(&ggtt->vm.open, open);
+
+       mutex_unlock(&ggtt->vm.mutex);
 
        intel_gt_check_and_clear_faults(ggtt->vm.gt);
 }
index 22198b758459b40d1639e7245532c972cea9fa86..9b30ddc49e4bcac8960267e3e99b2635a57bffed 100644 (file)
@@ -1229,31 +1229,9 @@ int i915_vma_move_to_active(struct i915_vma *vma,
        return 0;
 }
 
-int __i915_vma_unbind(struct i915_vma *vma)
+void __i915_vma_evict(struct i915_vma *vma)
 {
-       int ret;
-
-       lockdep_assert_held(&vma->vm->mutex);
-
-       if (i915_vma_is_pinned(vma)) {
-               vma_print_allocator(vma, "is pinned");
-               return -EAGAIN;
-       }
-
-       /*
-        * After confirming that no one else is pinning this vma, wait for
-        * any laggards who may have crept in during the wait (through
-        * a residual pin skipping the vm->mutex) to complete.
-        */
-       ret = i915_vma_sync(vma);
-       if (ret)
-               return ret;
-
-       if (!drm_mm_node_allocated(&vma->node))
-               return 0;
-
        GEM_BUG_ON(i915_vma_is_pinned(vma));
-       GEM_BUG_ON(i915_vma_is_active(vma));
 
        if (i915_vma_is_map_and_fenceable(vma)) {
                /* Force a pagefault for domain tracking on next user access */
@@ -1292,6 +1270,33 @@ int __i915_vma_unbind(struct i915_vma *vma)
 
        i915_vma_detach(vma);
        vma_unbind_pages(vma);
+}
+
+int __i915_vma_unbind(struct i915_vma *vma)
+{
+       int ret;
+
+       lockdep_assert_held(&vma->vm->mutex);
+
+       if (!drm_mm_node_allocated(&vma->node))
+               return 0;
+
+       if (i915_vma_is_pinned(vma)) {
+               vma_print_allocator(vma, "is pinned");
+               return -EAGAIN;
+       }
+
+       /*
+        * After confirming that no one else is pinning this vma, wait for
+        * any laggards who may have crept in during the wait (through
+        * a residual pin skipping the vm->mutex) to complete.
+        */
+       ret = i915_vma_sync(vma);
+       if (ret)
+               return ret;
+
+       GEM_BUG_ON(i915_vma_is_active(vma));
+       __i915_vma_evict(vma);
 
        drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */
        return 0;
@@ -1303,13 +1308,13 @@ int i915_vma_unbind(struct i915_vma *vma)
        intel_wakeref_t wakeref = 0;
        int err;
 
-       if (!drm_mm_node_allocated(&vma->node))
-               return 0;
-
        /* Optimistic wait before taking the mutex */
        err = i915_vma_sync(vma);
        if (err)
-               goto out_rpm;
+               return err;
+
+       if (!drm_mm_node_allocated(&vma->node))
+               return 0;
 
        if (i915_vma_is_pinned(vma)) {
                vma_print_allocator(vma, "is pinned");
index 8ad1daabcd58bbb1ec37be60b3a207f5df223104..d0d01f9095486aa67a19afe761e39d94df1004f0 100644 (file)
@@ -203,6 +203,7 @@ bool i915_vma_misplaced(const struct i915_vma *vma,
                        u64 size, u64 alignment, u64 flags);
 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
 void i915_vma_revoke_mmap(struct i915_vma *vma);
+void __i915_vma_evict(struct i915_vma *vma);
 int __i915_vma_unbind(struct i915_vma *vma);
 int __must_check i915_vma_unbind(struct i915_vma *vma);
 void i915_vma_unlink_ctx(struct i915_vma *vma);