drm/i915: Add locking to i915_gem_evict_vm(), v3.
authorMaarten Lankhorst <maarten.lankhorst@linux.intel.com>
Mon, 17 Jan 2022 07:56:04 +0000 (08:56 +0100)
committerMaarten Lankhorst <maarten.lankhorst@linux.intel.com>
Tue, 18 Jan 2022 11:00:30 +0000 (12:00 +0100)
i915_gem_evict_vm will need to be able to evict objects that are
locked by the current ctx. By testing if the current context already
locked the object, we can do this correctly. This allows us to
evict the entire vm even if we already hold some objects' locks.

Previously, this was spread over several commits, but it makes
more sense to commit the changes to i915_gem_evict_vm separately
from the changes to i915_gem_evict_something() and
i915_gem_evict_for_node().

Changes since v1:
- Handle evicting dead objects better.
Changes since v2:
- Use for_i915_gem_ww in igt_evict_vm. (Thomas)

Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
[mlankhorst: Fix up doc warning.]
Link: https://patchwork.freedesktop.org/patch/msgid/20220117075604.131477-1-maarten.lankhorst@linux.intel.com
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
drivers/gpu/drm/i915/gem/i915_gem_mman.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem_evict.c
drivers/gpu/drm/i915/i915_vma.c
drivers/gpu/drm/i915/selftests/i915_gem_evict.c

index 2065e5e44fac1e93cb73d278b28861034ce4e98f..97b9fd7afbef2191b2ab9db5e4b819711a834ad9 100644 (file)
@@ -754,7 +754,7 @@ static int eb_reserve(struct i915_execbuffer *eb)
                case 1:
                        /* Too fragmented, unbind everything and retry */
                        mutex_lock(&eb->context->vm->mutex);
-                       err = i915_gem_evict_vm(eb->context->vm);
+                       err = i915_gem_evict_vm(eb->context->vm, &eb->ww);
                        mutex_unlock(&eb->context->vm->mutex);
                        if (err)
                                return err;
index fafd158e531384b292b66cef307be80234146abc..4afad1604a6ac86afa803ca1741087eda76309db 100644 (file)
@@ -21,7 +21,6 @@
 #include "i915_trace.h"
 #include "i915_user_extensions.h"
 #include "i915_gem_ttm.h"
-#include "i915_gem_evict.h"
 #include "i915_vma.h"
 
 static inline bool
@@ -367,7 +366,7 @@ retry:
                if (vma == ERR_PTR(-ENOSPC)) {
                        ret = mutex_lock_interruptible(&ggtt->vm.mutex);
                        if (!ret) {
-                               ret = i915_gem_evict_vm(&ggtt->vm);
+                               ret = i915_gem_evict_vm(&ggtt->vm, &ww);
                                mutex_unlock(&ggtt->vm.mutex);
                        }
                        if (ret)
index b61a1c7857397cf9b19fa37c4d43499741eee76a..deb7b82a7cf19908343c77ee9dff1bbd57a1ca44 100644 (file)
@@ -1742,7 +1742,8 @@ int __must_check i915_gem_evict_something(struct i915_address_space *vm,
 int __must_check i915_gem_evict_for_node(struct i915_address_space *vm,
                                         struct drm_mm_node *node,
                                         unsigned int flags);
-int i915_gem_evict_vm(struct i915_address_space *vm);
+int i915_gem_evict_vm(struct i915_address_space *vm,
+                     struct i915_gem_ww_ctx *ww);
 
 /* i915_gem_internal.c */
 struct drm_i915_gem_object *
index 2b73ddb11c66fc034b8e6e5425324fca0e216bee..670dceaa9b247235b1e951d89b2b756014da24e8 100644 (file)
@@ -358,6 +358,8 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
 /**
  * i915_gem_evict_vm - Evict all idle vmas from a vm
  * @vm: Address space to cleanse
+ * @ww: An optional struct i915_gem_ww_ctx. If not NULL, i915_gem_evict_vm
+ * will be able to evict vma's locked by the ww as well.
  *
  * This function evicts all vmas from a vm.
  *
@@ -367,7 +369,7 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
  * To clarify: This is for freeing up virtual address space, not for freeing
  * memory in e.g. the shrinker.
  */
-int i915_gem_evict_vm(struct i915_address_space *vm)
+int i915_gem_evict_vm(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww)
 {
        int ret = 0;
 
@@ -388,24 +390,52 @@ int i915_gem_evict_vm(struct i915_address_space *vm)
        do {
                struct i915_vma *vma, *vn;
                LIST_HEAD(eviction_list);
+               LIST_HEAD(locked_eviction_list);
 
                list_for_each_entry(vma, &vm->bound_list, vm_link) {
                        if (i915_vma_is_pinned(vma))
                                continue;
 
+                       /*
+                        * If we already own the lock, trylock fails. In case
+                        * the resv is shared among multiple objects, we still
+                        * need the object ref.
+                        */
+                       if (!kref_read(&vma->obj->base.refcount) ||
+                           (ww && (dma_resv_locking_ctx(vma->obj->base.resv) == &ww->ctx))) {
+                               __i915_vma_pin(vma);
+                               list_add(&vma->evict_link, &locked_eviction_list);
+                               continue;
+                       }
+
+                       if (!i915_gem_object_trylock(vma->obj, ww))
+                               continue;
+
                        __i915_vma_pin(vma);
                        list_add(&vma->evict_link, &eviction_list);
                }
-               if (list_empty(&eviction_list))
+               if (list_empty(&eviction_list) && list_empty(&locked_eviction_list))
                        break;
 
                ret = 0;
+               /* Unbind locked objects first, before unlocking the eviction_list */
+               list_for_each_entry_safe(vma, vn, &locked_eviction_list, evict_link) {
+                       __i915_vma_unpin(vma);
+
+                       if (ret == 0)
+                               ret = __i915_vma_unbind(vma);
+                       if (ret != -EINTR) /* "Get me out of here!" */
+                               ret = 0;
+               }
+
                list_for_each_entry_safe(vma, vn, &eviction_list, evict_link) {
                        __i915_vma_unpin(vma);
                        if (ret == 0)
                                ret = __i915_vma_unbind(vma);
                        if (ret != -EINTR) /* "Get me out of here!" */
                                ret = 0;
+
+                       i915_gem_object_unlock(vma->obj);
                }
        } while (ret == 0);
 
index 9d859b0a3fbe101495a3b29cf9317307e5c3c7f0..8bbc08a0e88f4b758e82fde93d972aeff636ab4e 100644 (file)
@@ -1479,7 +1479,12 @@ static int __i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
                /* Unlike i915_vma_pin, we don't take no for an answer! */
                flush_idle_contexts(vm->gt);
                if (mutex_lock_interruptible(&vm->mutex) == 0) {
-                       i915_gem_evict_vm(vm);
+                       /*
+                        * We pass NULL ww here, as we don't want to unbind
+                        * locked objects when called from execbuf when pinning
+                        * is removed. This would probably regress badly.
+                        */
+                       i915_gem_evict_vm(vm, NULL);
                        mutex_unlock(&vm->mutex);
                }
        } while (1);
index 75b709c26dd3c7c1803275e9f22ebb7c9aba88a5..19a348546b739c8d528eea0c1c000d0011d0e635 100644 (file)
@@ -331,6 +331,7 @@ static int igt_evict_vm(void *arg)
 {
        struct intel_gt *gt = arg;
        struct i915_ggtt *ggtt = gt->ggtt;
+       struct i915_gem_ww_ctx ww;
        LIST_HEAD(objects);
        int err;
 
@@ -342,7 +343,7 @@ static int igt_evict_vm(void *arg)
 
        /* Everything is pinned, nothing should happen */
        mutex_lock(&ggtt->vm.mutex);
-       err = i915_gem_evict_vm(&ggtt->vm);
+       err = i915_gem_evict_vm(&ggtt->vm, NULL);
        mutex_unlock(&ggtt->vm.mutex);
        if (err) {
                pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n",
@@ -352,9 +353,12 @@ static int igt_evict_vm(void *arg)
 
        unpin_ggtt(ggtt);
 
-       mutex_lock(&ggtt->vm.mutex);
-       err = i915_gem_evict_vm(&ggtt->vm);
-       mutex_unlock(&ggtt->vm.mutex);
+       for_i915_gem_ww(&ww, err, false) {
+               mutex_lock(&ggtt->vm.mutex);
+               err = i915_gem_evict_vm(&ggtt->vm, &ww);
+               mutex_unlock(&ggtt->vm.mutex);
+       }
+
        if (err) {
                pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n",
                       err);