gpuvm->rb.tree = RB_ROOT_CACHED;
INIT_LIST_HEAD(&gpuvm->rb.list);
+ kref_init(&gpuvm->kref);
+
gpuvm->name = name ? name : "unknown";
gpuvm->flags = flags;
gpuvm->ops = ops;
}
EXPORT_SYMBOL_GPL(drm_gpuvm_init);
-/**
- * drm_gpuvm_destroy() - cleanup a &drm_gpuvm
- * @gpuvm: pointer to the &drm_gpuvm to clean up
- *
- * Note that it is a bug to call this function on a manager that still
- * holds GPU VA mappings.
- */
-void
-drm_gpuvm_destroy(struct drm_gpuvm *gpuvm)
+static void
+drm_gpuvm_fini(struct drm_gpuvm *gpuvm)
{
gpuvm->name = NULL;
drm_gem_object_put(gpuvm->r_obj);
}
-EXPORT_SYMBOL_GPL(drm_gpuvm_destroy);
+
+static void
+drm_gpuvm_free(struct kref *kref)
+{
+ struct drm_gpuvm *gpuvm = container_of(kref, struct drm_gpuvm, kref);
+
+ drm_gpuvm_fini(gpuvm);
+
+ if (drm_WARN_ON(gpuvm->drm, !gpuvm->ops->vm_free))
+ return;
+
+ gpuvm->ops->vm_free(gpuvm);
+}
+
+/**
+ * drm_gpuvm_put() - drop a struct drm_gpuvm reference
+ * @gpuvm: the &drm_gpuvm to release the reference of
+ *
+ * This releases a reference to @gpuvm.
+ *
+ * This function may be called from atomic context.
+ */
+void
+drm_gpuvm_put(struct drm_gpuvm *gpuvm)
+{
+ if (gpuvm)
+ kref_put(&gpuvm->kref, drm_gpuvm_free);
+}
+EXPORT_SYMBOL_GPL(drm_gpuvm_put);
static int
__drm_gpuva_insert(struct drm_gpuvm *gpuvm,
{
u64 addr = va->va.addr;
u64 range = va->va.range;
+ int ret;
if (unlikely(!drm_gpuvm_range_valid(gpuvm, addr, range)))
return -EINVAL;
- return __drm_gpuva_insert(gpuvm, va);
+ ret = __drm_gpuva_insert(gpuvm, va);
+ if (likely(!ret))
+ /* Take a reference of the GPUVM for the successfully inserted
+ * drm_gpuva. We can't take the reference in
+ * __drm_gpuva_insert() itself, since we don't want to increse
+ * the reference count for the GPUVM's kernel_alloc_node.
+ */
+ drm_gpuvm_get(gpuvm);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(drm_gpuva_insert);
}
__drm_gpuva_remove(va);
+ drm_gpuvm_put(va->vm);
}
EXPORT_SYMBOL_GPL(drm_gpuva_remove);
}
}
+static void
+nouveau_uvmm_free(struct drm_gpuvm *gpuvm)
+{
+ struct nouveau_uvmm *uvmm = uvmm_from_gpuvm(gpuvm);
+
+ kfree(uvmm);
+}
+
+static const struct drm_gpuvm_ops gpuvm_ops = {
+ .vm_free = nouveau_uvmm_free,
+};
+
int
nouveau_uvmm_ioctl_vm_init(struct drm_device *dev,
void *data,
NOUVEAU_VA_SPACE_END,
init->kernel_managed_addr,
init->kernel_managed_size,
- NULL);
+ &gpuvm_ops);
/* GPUVM takes care from here on. */
drm_gem_object_put(r_obj);
return 0;
out_gpuvm_fini:
- drm_gpuvm_destroy(&uvmm->base);
- kfree(uvmm);
+ drm_gpuvm_put(&uvmm->base);
out_unlock:
mutex_unlock(&cli->mutex);
return ret;
mutex_lock(&cli->mutex);
nouveau_vmm_fini(&uvmm->vmm);
- drm_gpuvm_destroy(&uvmm->base);
- kfree(uvmm);
+ drm_gpuvm_put(&uvmm->base);
mutex_unlock(&cli->mutex);
}
struct list_head list;
} rb;
+ /**
+ * @kref: reference count of this object
+ */
+ struct kref kref;
+
/**
* @kernel_alloc_node:
*
u64 start_offset, u64 range,
u64 reserve_offset, u64 reserve_range,
const struct drm_gpuvm_ops *ops);
-void drm_gpuvm_destroy(struct drm_gpuvm *gpuvm);
+
+/**
+ * drm_gpuvm_get() - acquire a struct drm_gpuvm reference
+ * @gpuvm: the &drm_gpuvm to acquire the reference of
+ *
+ * This function acquires an additional reference to @gpuvm. It is illegal to
+ * call this without already holding a reference. No locks required.
+ */
+static inline struct drm_gpuvm *
+drm_gpuvm_get(struct drm_gpuvm *gpuvm)
+{
+ kref_get(&gpuvm->kref);
+
+ return gpuvm;
+}
+
+void drm_gpuvm_put(struct drm_gpuvm *gpuvm);
bool drm_gpuvm_range_valid(struct drm_gpuvm *gpuvm, u64 addr, u64 range);
bool drm_gpuvm_interval_empty(struct drm_gpuvm *gpuvm, u64 addr, u64 range);
* operations to drivers.
*/
struct drm_gpuvm_ops {
+ /**
+ * @vm_free: called when the last reference of a struct drm_gpuvm is
+ * dropped
+ *
+ * This callback is mandatory.
+ */
+ void (*vm_free)(struct drm_gpuvm *gpuvm);
+
/**
* @op_alloc: called when the &drm_gpuvm allocates
* a struct drm_gpuva_op