drm/amdgpu/userq: use consistent function naming
authorAlex Deucher <alexander.deucher@amd.com>
Wed, 16 Apr 2025 21:49:45 +0000 (17:49 -0400)
committerAlex Deucher <alexander.deucher@amd.com>
Tue, 22 Apr 2025 12:51:46 +0000 (08:51 -0400)
s/userqueue/userq/

1. remove the mix of amdgpu_userqueue and amdgpu_userq
2. to be consistent with other amdgpu_userq_fence.c
3. it's shorter

Reviewed-by: Prike Liang <Prike.Liang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
13 files changed:
drivers/gpu/drm/amd/amdgpu/Makefile
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.c
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h
drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c [deleted file]
drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.h [deleted file]
drivers/gpu/drm/amd/amdgpu/mes_userqueue.c
drivers/gpu/drm/amd/amdgpu/mes_userqueue.h

index 513c4d64f5542254ebf93d64277cfd8a838152a6..8595e05c691b16139106c857fa6878a6910ef342 100644 (file)
@@ -257,7 +257,7 @@ amdgpu-y += \
 amdgpu-y += amdgpu_amdkfd.o
 
 # add gfx usermode queue
-amdgpu-y += amdgpu_userqueue.o
+amdgpu-y += amdgpu_userq.o
 
 ifneq ($(CONFIG_HSA_AMD),)
 AMDKFD_PATH := ../amdkfd
index decf66c2a71870d82b082d4ceb6915abc0e5e2e9..cc26cf1bd843ee6445a35153a7767e4c162b4da9 100644 (file)
 #include "amdgpu_xcp.h"
 #include "amdgpu_seq64.h"
 #include "amdgpu_reg_state.h"
-#include "amdgpu_userqueue.h"
+#include "amdgpu_userq.h"
 #include "amdgpu_eviction_fence.h"
 #if defined(CONFIG_DRM_AMD_ISP)
 #include "amdgpu_isp.h"
index e24b0c730baf52cc20aa0103d552ad79fdc5ead5..b9a1ef343c79cf451c7217d9e303daed1f26cff9 100644 (file)
@@ -51,7 +51,7 @@
 #include "amdgpu_reset.h"
 #include "amdgpu_sched.h"
 #include "amdgpu_xgmi.h"
-#include "amdgpu_userqueue.h"
+#include "amdgpu_userq.h"
 #include "amdgpu_userq_fence.h"
 #include "../amdxcp/amdgpu_xcp_drv.h"
 
index 02164bca51a7dc9786a2fd1898255324cdb56454..faa3f59b20c5ca876ce8efbbc78ab3eae305abce 100644 (file)
@@ -112,7 +112,7 @@ amdgpu_eviction_fence_suspend_worker(struct work_struct *work)
        if (!ev_fence)
                goto unlock;
 
-       amdgpu_userqueue_evict(uq_mgr, ev_fence);
+       amdgpu_userq_evict(uq_mgr, ev_fence);
 
 unlock:
        mutex_unlock(&uq_mgr->userq_mutex);
index 151366ecc0af6127c6f9be68ecfeadf5ff62286d..8f992314c5a1e3ce261bf3ec32d7f8664aeba683 100644 (file)
@@ -45,7 +45,7 @@
 #include "amdgpu_ras.h"
 #include "amdgpu_reset.h"
 #include "amd_pcie.h"
-#include "amdgpu_userqueue.h"
+#include "amdgpu_userq.h"
 
 void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev)
 {
@@ -1009,7 +1009,7 @@ out:
                        }
                }
 
-               dev_info->userq_ip_mask = amdgpu_userqueue_get_supported_ip_mask(adev);
+               dev_info->userq_ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
 
                ret = copy_to_user(out, dev_info,
                                   min((size_t)size, sizeof(*dev_info))) ? -EFAULT : 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
new file mode 100644 (file)
index 0000000..4be72be
--- /dev/null
@@ -0,0 +1,915 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <drm/drm_auth.h>
+#include <drm/drm_exec.h>
+#include <linux/pm_runtime.h>
+
+#include "amdgpu.h"
+#include "amdgpu_vm.h"
+#include "amdgpu_userq.h"
+#include "amdgpu_userq_fence.h"
+
+u32 amdgpu_userq_get_supported_ip_mask(struct amdgpu_device *adev)
+{
+       int i;
+       u32 userq_ip_mask = 0;
+
+       for (i = 0; i < AMDGPU_HW_IP_NUM; i++) {
+               if (adev->userq_funcs[i])
+                       userq_ip_mask |= (1 << i);
+       }
+
+       return userq_ip_mask;
+}
+
+static int
+amdgpu_userq_unmap_helper(struct amdgpu_userq_mgr *uq_mgr,
+                         struct amdgpu_usermode_queue *queue)
+{
+       struct amdgpu_device *adev = uq_mgr->adev;
+       const struct amdgpu_userq_funcs *userq_funcs =
+               adev->userq_funcs[queue->queue_type];
+       int r = 0;
+
+       if (queue->state == AMDGPU_USERQ_STATE_MAPPED) {
+               r = userq_funcs->unmap(uq_mgr, queue);
+               if (r)
+                       queue->state = AMDGPU_USERQ_STATE_HUNG;
+               else
+                       queue->state = AMDGPU_USERQ_STATE_UNMAPPED;
+       }
+       return r;
+}
+
+static int
+amdgpu_userq_map_helper(struct amdgpu_userq_mgr *uq_mgr,
+                       struct amdgpu_usermode_queue *queue)
+{
+       struct amdgpu_device *adev = uq_mgr->adev;
+       const struct amdgpu_userq_funcs *userq_funcs =
+               adev->userq_funcs[queue->queue_type];
+       int r = 0;
+
+       if (queue->state == AMDGPU_USERQ_STATE_UNMAPPED) {
+               r = userq_funcs->map(uq_mgr, queue);
+               if (r) {
+                       queue->state = AMDGPU_USERQ_STATE_HUNG;
+               } else {
+                       queue->state = AMDGPU_USERQ_STATE_MAPPED;
+               }
+       }
+       return r;
+}
+
+static void
+amdgpu_userq_wait_for_last_fence(struct amdgpu_userq_mgr *uq_mgr,
+                                struct amdgpu_usermode_queue *queue)
+{
+       struct amdgpu_device *adev = uq_mgr->adev;
+       struct dma_fence *f = queue->last_fence;
+       int ret;
+
+       if (f && !dma_fence_is_signaled(f)) {
+               ret = dma_fence_wait_timeout(f, true, msecs_to_jiffies(100));
+               if (ret <= 0)
+                       dev_err(adev->dev, "Timed out waiting for fence f=%p\n", f);
+       }
+}
+
+static void
+amdgpu_userq_cleanup(struct amdgpu_userq_mgr *uq_mgr,
+                    struct amdgpu_usermode_queue *queue,
+                    int queue_id)
+{
+       struct amdgpu_device *adev = uq_mgr->adev;
+       const struct amdgpu_userq_funcs *uq_funcs = adev->userq_funcs[queue->queue_type];
+
+       uq_funcs->mqd_destroy(uq_mgr, queue);
+       amdgpu_userq_fence_driver_free(queue);
+       idr_remove(&uq_mgr->userq_idr, queue_id);
+       kfree(queue);
+}
+
+int
+amdgpu_userq_active(struct amdgpu_userq_mgr *uq_mgr)
+{
+       struct amdgpu_usermode_queue *queue;
+       int queue_id;
+       int ret = 0;
+
+       mutex_lock(&uq_mgr->userq_mutex);
+       /* Resume all the queues for this process */
+       idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id)
+               ret += queue->state == AMDGPU_USERQ_STATE_MAPPED;
+
+       mutex_unlock(&uq_mgr->userq_mutex);
+       return ret;
+}
+
+#ifdef CONFIG_DRM_AMDGPU_NAVI3X_USERQ
+static struct amdgpu_usermode_queue *
+amdgpu_userq_find(struct amdgpu_userq_mgr *uq_mgr, int qid)
+{
+       return idr_find(&uq_mgr->userq_idr, qid);
+}
+
+void
+amdgpu_userq_ensure_ev_fence(struct amdgpu_userq_mgr *uq_mgr,
+                            struct amdgpu_eviction_fence_mgr *evf_mgr)
+{
+       struct amdgpu_eviction_fence *ev_fence;
+
+retry:
+       /* Flush any pending resume work to create ev_fence */
+       flush_delayed_work(&uq_mgr->resume_work);
+
+       mutex_lock(&uq_mgr->userq_mutex);
+       spin_lock(&evf_mgr->ev_fence_lock);
+       ev_fence = evf_mgr->ev_fence;
+       spin_unlock(&evf_mgr->ev_fence_lock);
+       if (!ev_fence || dma_fence_is_signaled(&ev_fence->base)) {
+               mutex_unlock(&uq_mgr->userq_mutex);
+               /*
+                * Looks like there was no pending resume work,
+                * add one now to create a valid eviction fence
+                */
+               schedule_delayed_work(&uq_mgr->resume_work, 0);
+               goto retry;
+       }
+}
+
+int amdgpu_userq_create_object(struct amdgpu_userq_mgr *uq_mgr,
+                              struct amdgpu_userq_obj *userq_obj,
+                              int size)
+{
+       struct amdgpu_device *adev = uq_mgr->adev;
+       struct amdgpu_bo_param bp;
+       int r;
+
+       memset(&bp, 0, sizeof(bp));
+       bp.byte_align = PAGE_SIZE;
+       bp.domain = AMDGPU_GEM_DOMAIN_GTT;
+       bp.flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
+                  AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+       bp.type = ttm_bo_type_kernel;
+       bp.size = size;
+       bp.resv = NULL;
+       bp.bo_ptr_size = sizeof(struct amdgpu_bo);
+
+       r = amdgpu_bo_create(adev, &bp, &userq_obj->obj);
+       if (r) {
+               DRM_ERROR("Failed to allocate BO for userqueue (%d)", r);
+               return r;
+       }
+
+       r = amdgpu_bo_reserve(userq_obj->obj, true);
+       if (r) {
+               DRM_ERROR("Failed to reserve BO to map (%d)", r);
+               goto free_obj;
+       }
+
+       r = amdgpu_ttm_alloc_gart(&(userq_obj->obj)->tbo);
+       if (r) {
+               DRM_ERROR("Failed to alloc GART for userqueue object (%d)", r);
+               goto unresv;
+       }
+
+       r = amdgpu_bo_kmap(userq_obj->obj, &userq_obj->cpu_ptr);
+       if (r) {
+               DRM_ERROR("Failed to map BO for userqueue (%d)", r);
+               goto unresv;
+       }
+
+       userq_obj->gpu_addr = amdgpu_bo_gpu_offset(userq_obj->obj);
+       amdgpu_bo_unreserve(userq_obj->obj);
+       memset(userq_obj->cpu_ptr, 0, size);
+       return 0;
+
+unresv:
+       amdgpu_bo_unreserve(userq_obj->obj);
+
+free_obj:
+       amdgpu_bo_unref(&userq_obj->obj);
+       return r;
+}
+
+void amdgpu_userq_destroy_object(struct amdgpu_userq_mgr *uq_mgr,
+                                struct amdgpu_userq_obj *userq_obj)
+{
+       amdgpu_bo_kunmap(userq_obj->obj);
+       amdgpu_bo_unref(&userq_obj->obj);
+}
+
+uint64_t
+amdgpu_userq_get_doorbell_index(struct amdgpu_userq_mgr *uq_mgr,
+                               struct amdgpu_db_info *db_info,
+                               struct drm_file *filp)
+{
+       uint64_t index;
+       struct drm_gem_object *gobj;
+       struct amdgpu_userq_obj *db_obj = db_info->db_obj;
+       int r, db_size;
+
+       gobj = drm_gem_object_lookup(filp, db_info->doorbell_handle);
+       if (gobj == NULL) {
+               DRM_ERROR("Can't find GEM object for doorbell\n");
+               return -EINVAL;
+       }
+
+       db_obj->obj = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
+       drm_gem_object_put(gobj);
+
+       /* Pin the BO before generating the index, unpin in queue destroy */
+       r = amdgpu_bo_pin(db_obj->obj, AMDGPU_GEM_DOMAIN_DOORBELL);
+       if (r) {
+               DRM_ERROR("[Usermode queues] Failed to pin doorbell object\n");
+               goto unref_bo;
+       }
+
+       r = amdgpu_bo_reserve(db_obj->obj, true);
+       if (r) {
+               DRM_ERROR("[Usermode queues] Failed to pin doorbell object\n");
+               goto unpin_bo;
+       }
+
+       switch (db_info->queue_type) {
+       case AMDGPU_HW_IP_GFX:
+       case AMDGPU_HW_IP_COMPUTE:
+       case AMDGPU_HW_IP_DMA:
+               db_size = sizeof(u64);
+               break;
+
+       case AMDGPU_HW_IP_VCN_ENC:
+               db_size = sizeof(u32);
+               db_info->doorbell_offset += AMDGPU_NAVI10_DOORBELL64_VCN0_1 << 1;
+               break;
+
+       case AMDGPU_HW_IP_VPE:
+               db_size = sizeof(u32);
+               db_info->doorbell_offset += AMDGPU_NAVI10_DOORBELL64_VPE << 1;
+               break;
+
+       default:
+               DRM_ERROR("[Usermode queues] IP %d not support\n", db_info->queue_type);
+               r = -EINVAL;
+               goto unpin_bo;
+       }
+
+       index = amdgpu_doorbell_index_on_bar(uq_mgr->adev, db_obj->obj,
+                                            db_info->doorbell_offset, db_size);
+       DRM_DEBUG_DRIVER("[Usermode queues] doorbell index=%lld\n", index);
+       amdgpu_bo_unreserve(db_obj->obj);
+       return index;
+
+unpin_bo:
+       amdgpu_bo_unpin(db_obj->obj);
+
+unref_bo:
+       amdgpu_bo_unref(&db_obj->obj);
+       return r;
+}
+
+static int
+amdgpu_userq_destroy(struct drm_file *filp, int queue_id)
+{
+       struct amdgpu_fpriv *fpriv = filp->driver_priv;
+       struct amdgpu_userq_mgr *uq_mgr = &fpriv->userq_mgr;
+       struct amdgpu_device *adev = uq_mgr->adev;
+       struct amdgpu_usermode_queue *queue;
+       int r = 0;
+
+       cancel_delayed_work(&uq_mgr->resume_work);
+       mutex_lock(&uq_mgr->userq_mutex);
+
+       queue = amdgpu_userq_find(uq_mgr, queue_id);
+       if (!queue) {
+               DRM_DEBUG_DRIVER("Invalid queue id to destroy\n");
+               mutex_unlock(&uq_mgr->userq_mutex);
+               return -EINVAL;
+       }
+       amdgpu_userq_wait_for_last_fence(uq_mgr, queue);
+       r = amdgpu_userq_unmap_helper(uq_mgr, queue);
+       amdgpu_bo_unpin(queue->db_obj.obj);
+       amdgpu_bo_unref(&queue->db_obj.obj);
+       amdgpu_userq_cleanup(uq_mgr, queue, queue_id);
+       mutex_unlock(&uq_mgr->userq_mutex);
+
+       pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+       pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+
+       return r;
+}
+
+static int amdgpu_userq_priority_permit(struct drm_file *filp,
+                                       int priority)
+{
+       if (priority < AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_HIGH)
+               return 0;
+
+       if (capable(CAP_SYS_NICE))
+               return 0;
+
+       if (drm_is_current_master(filp))
+               return 0;
+
+       return -EACCES;
+}
+
+static int
+amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
+{
+       struct amdgpu_fpriv *fpriv = filp->driver_priv;
+       struct amdgpu_userq_mgr *uq_mgr = &fpriv->userq_mgr;
+       struct amdgpu_device *adev = uq_mgr->adev;
+       const struct amdgpu_userq_funcs *uq_funcs;
+       struct amdgpu_usermode_queue *queue;
+       struct amdgpu_db_info db_info;
+       bool skip_map_queue;
+       uint64_t index;
+       int qid, r = 0;
+       int priority =
+               (args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK) >>
+               AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_SHIFT;
+
+       /* Usermode queues are only supported for GFX IP as of now */
+       if (args->in.ip_type != AMDGPU_HW_IP_GFX &&
+           args->in.ip_type != AMDGPU_HW_IP_DMA &&
+           args->in.ip_type != AMDGPU_HW_IP_COMPUTE) {
+               DRM_ERROR("Usermode queue doesn't support IP type %u\n", args->in.ip_type);
+               return -EINVAL;
+       }
+
+       r = amdgpu_userq_priority_permit(filp, priority);
+       if (r)
+               return r;
+
+       if ((args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE) &&
+           (args->in.ip_type != AMDGPU_HW_IP_GFX) &&
+           (args->in.ip_type != AMDGPU_HW_IP_COMPUTE) &&
+           !amdgpu_is_tmz(adev)) {
+               drm_err(adev_to_drm(adev), "Secure only supported on GFX/Compute queues\n");
+               return -EINVAL;
+       }
+
+       r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
+       if (r < 0) {
+               dev_err(adev->dev, "pm_runtime_get_sync() failed for userqueue create\n");
+               pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+               return r;
+       }
+
+       /*
+        * There could be a situation that we are creating a new queue while
+        * the other queues under this UQ_mgr are suspended. So if there is any
+        * resume work pending, wait for it to get done.
+        *
+        * This will also make sure we have a valid eviction fence ready to be used.
+        */
+       amdgpu_userq_ensure_ev_fence(&fpriv->userq_mgr, &fpriv->evf_mgr);
+
+       uq_funcs = adev->userq_funcs[args->in.ip_type];
+       if (!uq_funcs) {
+               DRM_ERROR("Usermode queue is not supported for this IP (%u)\n", args->in.ip_type);
+               r = -EINVAL;
+               goto unlock;
+       }
+
+       queue = kzalloc(sizeof(struct amdgpu_usermode_queue), GFP_KERNEL);
+       if (!queue) {
+               DRM_ERROR("Failed to allocate memory for queue\n");
+               r = -ENOMEM;
+               goto unlock;
+       }
+       queue->doorbell_handle = args->in.doorbell_handle;
+       queue->queue_type = args->in.ip_type;
+       queue->vm = &fpriv->vm;
+       queue->priority = priority;
+
+       db_info.queue_type = queue->queue_type;
+       db_info.doorbell_handle = queue->doorbell_handle;
+       db_info.db_obj = &queue->db_obj;
+       db_info.doorbell_offset = args->in.doorbell_offset;
+
+       /* Convert relative doorbell offset into absolute doorbell index */
+       index = amdgpu_userq_get_doorbell_index(uq_mgr, &db_info, filp);
+       if (index == (uint64_t)-EINVAL) {
+               DRM_ERROR("Failed to get doorbell for queue\n");
+               kfree(queue);
+               goto unlock;
+       }
+
+       queue->doorbell_index = index;
+       xa_init_flags(&queue->fence_drv_xa, XA_FLAGS_ALLOC);
+       r = amdgpu_userq_fence_driver_alloc(adev, queue);
+       if (r) {
+               DRM_ERROR("Failed to alloc fence driver\n");
+               goto unlock;
+       }
+
+       r = uq_funcs->mqd_create(uq_mgr, &args->in, queue);
+       if (r) {
+               DRM_ERROR("Failed to create Queue\n");
+               amdgpu_userq_fence_driver_free(queue);
+               kfree(queue);
+               goto unlock;
+       }
+
+
+       qid = idr_alloc(&uq_mgr->userq_idr, queue, 1, AMDGPU_MAX_USERQ_COUNT, GFP_KERNEL);
+       if (qid < 0) {
+               DRM_ERROR("Failed to allocate a queue id\n");
+               amdgpu_userq_fence_driver_free(queue);
+               uq_funcs->mqd_destroy(uq_mgr, queue);
+               kfree(queue);
+               r = -ENOMEM;
+               goto unlock;
+       }
+
+       /* don't map the queue if scheduling is halted */
+       mutex_lock(&adev->userq_mutex);
+       if (adev->userq_halt_for_enforce_isolation &&
+           ((queue->queue_type == AMDGPU_HW_IP_GFX) ||
+            (queue->queue_type == AMDGPU_HW_IP_COMPUTE)))
+               skip_map_queue = true;
+       else
+               skip_map_queue = false;
+       if (!skip_map_queue) {
+               r = amdgpu_userq_map_helper(uq_mgr, queue);
+               if (r) {
+                       mutex_unlock(&adev->userq_mutex);
+                       DRM_ERROR("Failed to map Queue\n");
+                       idr_remove(&uq_mgr->userq_idr, qid);
+                       amdgpu_userq_fence_driver_free(queue);
+                       uq_funcs->mqd_destroy(uq_mgr, queue);
+                       kfree(queue);
+                       goto unlock;
+               }
+       }
+       mutex_unlock(&adev->userq_mutex);
+
+
+       args->out.queue_id = qid;
+
+unlock:
+       mutex_unlock(&uq_mgr->userq_mutex);
+
+       return r;
+}
+
+int amdgpu_userq_ioctl(struct drm_device *dev, void *data,
+                      struct drm_file *filp)
+{
+       union drm_amdgpu_userq *args = data;
+       int r;
+
+       switch (args->in.op) {
+       case AMDGPU_USERQ_OP_CREATE:
+               if (args->in.flags & ~(AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK |
+                                      AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE))
+                       return -EINVAL;
+               r = amdgpu_userq_create(filp, args);
+               if (r)
+                       DRM_ERROR("Failed to create usermode queue\n");
+               break;
+
+       case AMDGPU_USERQ_OP_FREE:
+               if (args->in.ip_type ||
+                   args->in.doorbell_handle ||
+                   args->in.doorbell_offset ||
+                   args->in.flags ||
+                   args->in.queue_va ||
+                   args->in.queue_size ||
+                   args->in.rptr_va ||
+                   args->in.wptr_va ||
+                   args->in.wptr_va ||
+                   args->in.mqd ||
+                   args->in.mqd_size)
+                       return -EINVAL;
+               r = amdgpu_userq_destroy(filp, args->in.queue_id);
+               if (r)
+                       DRM_ERROR("Failed to destroy usermode queue\n");
+               break;
+
+       default:
+               DRM_DEBUG_DRIVER("Invalid user queue op specified: %d\n", args->in.op);
+               return -EINVAL;
+       }
+
+       return r;
+}
+#else
+int amdgpu_userq_ioctl(struct drm_device *dev, void *data,
+                      struct drm_file *filp)
+{
+       return -ENOTSUPP;
+}
+#endif
+
+static int
+amdgpu_userq_restore_all(struct amdgpu_userq_mgr *uq_mgr)
+{
+       struct amdgpu_device *adev = uq_mgr->adev;
+       struct amdgpu_usermode_queue *queue;
+       int queue_id;
+       int ret = 0, r;
+
+       /* Resume all the queues for this process */
+       idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) {
+               r = amdgpu_userq_map_helper(uq_mgr, queue);
+               if (r)
+                       ret = r;
+       }
+
+       if (ret)
+               dev_err(adev->dev, "Failed to map all the queues\n");
+       return ret;
+}
+
+static int
+amdgpu_userq_validate_vm_bo(void *_unused, struct amdgpu_bo *bo)
+{
+       struct ttm_operation_ctx ctx = { false, false };
+       int ret;
+
+       amdgpu_bo_placement_from_domain(bo, bo->allowed_domains);
+
+       ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+       if (ret)
+               DRM_ERROR("Fail to validate\n");
+
+       return ret;
+}
+
+static int
+amdgpu_userq_validate_bos(struct amdgpu_userq_mgr *uq_mgr)
+{
+       struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
+       struct amdgpu_vm *vm = &fpriv->vm;
+       struct amdgpu_device *adev = uq_mgr->adev;
+       struct amdgpu_bo_va *bo_va;
+       struct ww_acquire_ctx *ticket;
+       struct drm_exec exec;
+       struct amdgpu_bo *bo;
+       struct dma_resv *resv;
+       bool clear, unlock;
+       int ret = 0;
+
+       drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0);
+       drm_exec_until_all_locked(&exec) {
+               ret = amdgpu_vm_lock_pd(vm, &exec, 2);
+               drm_exec_retry_on_contention(&exec);
+               if (unlikely(ret)) {
+                       DRM_ERROR("Failed to lock PD\n");
+                       goto unlock_all;
+               }
+
+               /* Lock the done list */
+               list_for_each_entry(bo_va, &vm->done, base.vm_status) {
+                       bo = bo_va->base.bo;
+                       if (!bo)
+                               continue;
+
+                       ret = drm_exec_lock_obj(&exec, &bo->tbo.base);
+                       drm_exec_retry_on_contention(&exec);
+                       if (unlikely(ret))
+                               goto unlock_all;
+               }
+       }
+
+       spin_lock(&vm->status_lock);
+       while (!list_empty(&vm->moved)) {
+               bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va,
+                                        base.vm_status);
+               spin_unlock(&vm->status_lock);
+
+               /* Per VM BOs never need to bo cleared in the page tables */
+               ret = amdgpu_vm_bo_update(adev, bo_va, false);
+               if (ret)
+                       goto unlock_all;
+               spin_lock(&vm->status_lock);
+       }
+
+       ticket = &exec.ticket;
+       while (!list_empty(&vm->invalidated)) {
+               bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va,
+                                        base.vm_status);
+               resv = bo_va->base.bo->tbo.base.resv;
+               spin_unlock(&vm->status_lock);
+
+               bo = bo_va->base.bo;
+               ret = amdgpu_userq_validate_vm_bo(NULL, bo);
+               if (ret) {
+                       DRM_ERROR("Failed to validate BO\n");
+                       goto unlock_all;
+               }
+
+               /* Try to reserve the BO to avoid clearing its ptes */
+               if (!adev->debug_vm && dma_resv_trylock(resv)) {
+                       clear = false;
+                       unlock = true;
+               /* The caller is already holding the reservation lock */
+               } else if (ticket && dma_resv_locking_ctx(resv) == ticket) {
+                       clear = false;
+                       unlock = false;
+               /* Somebody else is using the BO right now */
+               } else {
+                       clear = true;
+                       unlock = false;
+               }
+
+               ret = amdgpu_vm_bo_update(adev, bo_va, clear);
+
+               if (unlock)
+                       dma_resv_unlock(resv);
+               if (ret)
+                       goto unlock_all;
+
+               spin_lock(&vm->status_lock);
+       }
+       spin_unlock(&vm->status_lock);
+
+       ret = amdgpu_eviction_fence_replace_fence(&fpriv->evf_mgr, &exec);
+       if (ret)
+               DRM_ERROR("Failed to replace eviction fence\n");
+
+unlock_all:
+       drm_exec_fini(&exec);
+       return ret;
+}
+
+static void amdgpu_userq_restore_worker(struct work_struct *work)
+{
+       struct amdgpu_userq_mgr *uq_mgr = work_to_uq_mgr(work, resume_work.work);
+       struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
+       int ret;
+
+       flush_work(&fpriv->evf_mgr.suspend_work.work);
+
+       mutex_lock(&uq_mgr->userq_mutex);
+
+       ret = amdgpu_userq_validate_bos(uq_mgr);
+       if (ret) {
+               DRM_ERROR("Failed to validate BOs to restore\n");
+               goto unlock;
+       }
+
+       ret = amdgpu_userq_restore_all(uq_mgr);
+       if (ret) {
+               DRM_ERROR("Failed to restore all queues\n");
+               goto unlock;
+       }
+
+unlock:
+       mutex_unlock(&uq_mgr->userq_mutex);
+}
+
+static int
+amdgpu_userq_evict_all(struct amdgpu_userq_mgr *uq_mgr)
+{
+       struct amdgpu_device *adev = uq_mgr->adev;
+       struct amdgpu_usermode_queue *queue;
+       int queue_id;
+       int ret = 0, r;
+
+       /* Try to unmap all the queues in this process ctx */
+       idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) {
+               r = amdgpu_userq_unmap_helper(uq_mgr, queue);
+               if (r)
+                       ret = r;
+       }
+
+       if (ret)
+               dev_err(adev->dev, "Couldn't unmap all the queues\n");
+       return ret;
+}
+
+static int
+amdgpu_userq_wait_for_signal(struct amdgpu_userq_mgr *uq_mgr)
+{
+       struct amdgpu_usermode_queue *queue;
+       int queue_id, ret;
+
+       idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) {
+               struct dma_fence *f = queue->last_fence;
+
+               if (!f || dma_fence_is_signaled(f))
+                       continue;
+               ret = dma_fence_wait_timeout(f, true, msecs_to_jiffies(100));
+               if (ret <= 0) {
+                       DRM_ERROR("Timed out waiting for fence=%llu:%llu\n",
+                                 f->context, f->seqno);
+                       return -ETIMEDOUT;
+               }
+       }
+
+       return 0;
+}
+
+void
+amdgpu_userq_evict(struct amdgpu_userq_mgr *uq_mgr,
+                  struct amdgpu_eviction_fence *ev_fence)
+{
+       int ret;
+       struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
+       struct amdgpu_eviction_fence_mgr *evf_mgr = &fpriv->evf_mgr;
+
+       /* Wait for any pending userqueue fence work to finish */
+       ret = amdgpu_userq_wait_for_signal(uq_mgr);
+       if (ret) {
+               DRM_ERROR("Not evicting userqueue, timeout waiting for work\n");
+               return;
+       }
+
+       ret = amdgpu_userq_evict_all(uq_mgr);
+       if (ret) {
+               DRM_ERROR("Failed to evict userqueue\n");
+               return;
+       }
+
+       /* Signal current eviction fence */
+       amdgpu_eviction_fence_signal(evf_mgr, ev_fence);
+
+       if (evf_mgr->fd_closing) {
+               cancel_delayed_work(&uq_mgr->resume_work);
+               return;
+       }
+
+       /* Schedule a resume work */
+       schedule_delayed_work(&uq_mgr->resume_work, 0);
+}
+
+int amdgpu_userq_mgr_init(struct amdgpu_userq_mgr *userq_mgr, struct amdgpu_device *adev)
+{
+       mutex_init(&userq_mgr->userq_mutex);
+       idr_init_base(&userq_mgr->userq_idr, 1);
+       userq_mgr->adev = adev;
+
+       mutex_lock(&adev->userq_mutex);
+       list_add(&userq_mgr->list, &adev->userq_mgr_list);
+       mutex_unlock(&adev->userq_mutex);
+
+       INIT_DELAYED_WORK(&userq_mgr->resume_work, amdgpu_userq_restore_worker);
+       return 0;
+}
+
+void amdgpu_userq_mgr_fini(struct amdgpu_userq_mgr *userq_mgr)
+{
+       struct amdgpu_device *adev = userq_mgr->adev;
+       struct amdgpu_usermode_queue *queue;
+       struct amdgpu_userq_mgr *uqm, *tmp;
+       uint32_t queue_id;
+
+       cancel_delayed_work(&userq_mgr->resume_work);
+
+       mutex_lock(&userq_mgr->userq_mutex);
+       idr_for_each_entry(&userq_mgr->userq_idr, queue, queue_id) {
+               amdgpu_userq_wait_for_last_fence(userq_mgr, queue);
+               amdgpu_userq_unmap_helper(userq_mgr, queue);
+               amdgpu_userq_cleanup(userq_mgr, queue, queue_id);
+       }
+       mutex_lock(&adev->userq_mutex);
+       list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
+               if (uqm == userq_mgr) {
+                       list_del(&uqm->list);
+                       break;
+               }
+       }
+       mutex_unlock(&adev->userq_mutex);
+       idr_destroy(&userq_mgr->userq_idr);
+       mutex_unlock(&userq_mgr->userq_mutex);
+       mutex_destroy(&userq_mgr->userq_mutex);
+}
+
+int amdgpu_userq_suspend(struct amdgpu_device *adev)
+{
+       u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
+       struct amdgpu_usermode_queue *queue;
+       struct amdgpu_userq_mgr *uqm, *tmp;
+       int queue_id;
+       int ret = 0, r;
+
+       if (!ip_mask)
+               return 0;
+
+       mutex_lock(&adev->userq_mutex);
+       list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
+               cancel_delayed_work_sync(&uqm->resume_work);
+               idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
+                       r = amdgpu_userq_unmap_helper(uqm, queue);
+                       if (r)
+                               ret = r;
+               }
+       }
+       mutex_unlock(&adev->userq_mutex);
+       return ret;
+}
+
+int amdgpu_userq_resume(struct amdgpu_device *adev)
+{
+       u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
+       struct amdgpu_usermode_queue *queue;
+       struct amdgpu_userq_mgr *uqm, *tmp;
+       int queue_id;
+       int ret = 0, r;
+
+       if (!ip_mask)
+               return 0;
+
+       mutex_lock(&adev->userq_mutex);
+       list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
+               idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
+                       r = amdgpu_userq_map_helper(uqm, queue);
+                       if (r)
+                               ret = r;
+               }
+       }
+       mutex_unlock(&adev->userq_mutex);
+       return ret;
+}
+
+int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev,
+                                                 u32 idx)
+{
+       u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
+       struct amdgpu_usermode_queue *queue;
+       struct amdgpu_userq_mgr *uqm, *tmp;
+       int queue_id;
+       int ret = 0, r;
+
+       /* only need to stop gfx/compute */
+       if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 << AMDGPU_HW_IP_COMPUTE))))
+               return 0;
+
+       mutex_lock(&adev->userq_mutex);
+       if (adev->userq_halt_for_enforce_isolation)
+               dev_warn(adev->dev, "userq scheduling already stopped!\n");
+       adev->userq_halt_for_enforce_isolation = true;
+       list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
+               cancel_delayed_work_sync(&uqm->resume_work);
+               idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
+                       if (((queue->queue_type == AMDGPU_HW_IP_GFX) ||
+                            (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) &&
+                           (queue->xcp_id == idx)) {
+                               r = amdgpu_userq_unmap_helper(uqm, queue);
+                               if (r)
+                                       ret = r;
+                       }
+               }
+       }
+       mutex_unlock(&adev->userq_mutex);
+       return ret;
+}
+
+int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev,
+                                                  u32 idx)
+{
+       u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
+       struct amdgpu_usermode_queue *queue;
+       struct amdgpu_userq_mgr *uqm, *tmp;
+       int queue_id;
+       int ret = 0, r;
+
+       /* only need to stop gfx/compute */
+       if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 << AMDGPU_HW_IP_COMPUTE))))
+               return 0;
+
+       mutex_lock(&adev->userq_mutex);
+       if (!adev->userq_halt_for_enforce_isolation)
+               dev_warn(adev->dev, "userq scheduling already started!\n");
+       adev->userq_halt_for_enforce_isolation = false;
+       list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
+               idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
+                       if (((queue->queue_type == AMDGPU_HW_IP_GFX) ||
+                            (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) &&
+                           (queue->xcp_id == idx)) {
+                               r = amdgpu_userq_map_helper(uqm, queue);
+                               if (r)
+                                       ret = r;
+                       }
+               }
+       }
+       mutex_unlock(&adev->userq_mutex);
+       return ret;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
new file mode 100644 (file)
index 0000000..4d3eb65
--- /dev/null
@@ -0,0 +1,133 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef AMDGPU_USERQ_H_
+#define AMDGPU_USERQ_H_
+#include "amdgpu_eviction_fence.h"
+
+#define AMDGPU_MAX_USERQ_COUNT 512
+
+#define to_ev_fence(f) container_of(f, struct amdgpu_eviction_fence, base)
+#define uq_mgr_to_fpriv(u) container_of(u, struct amdgpu_fpriv, userq_mgr)
+#define work_to_uq_mgr(w, name) container_of(w, struct amdgpu_userq_mgr, name)
+
+enum amdgpu_userq_state {
+       AMDGPU_USERQ_STATE_UNMAPPED = 0,
+       AMDGPU_USERQ_STATE_MAPPED,
+       AMDGPU_USERQ_STATE_PREEMPTED,
+       AMDGPU_USERQ_STATE_HUNG,
+};
+
+struct amdgpu_mqd_prop;
+
+struct amdgpu_userq_obj {
+       void             *cpu_ptr;
+       uint64_t         gpu_addr;
+       struct amdgpu_bo *obj;
+};
+
+struct amdgpu_usermode_queue {
+       int                     queue_type;
+       enum amdgpu_userq_state state;
+       uint64_t                doorbell_handle;
+       uint64_t                doorbell_index;
+       uint64_t                flags;
+       struct amdgpu_mqd_prop  *userq_prop;
+       struct amdgpu_userq_mgr *userq_mgr;
+       struct amdgpu_vm        *vm;
+       struct amdgpu_userq_obj mqd;
+       struct amdgpu_userq_obj db_obj;
+       struct amdgpu_userq_obj fw_obj;
+       struct amdgpu_userq_obj wptr_obj;
+       struct xarray           fence_drv_xa;
+       struct amdgpu_userq_fence_driver *fence_drv;
+       struct dma_fence        *last_fence;
+       u32                     xcp_id;
+       int                     priority;
+};
+
+struct amdgpu_userq_funcs {
+       int (*mqd_create)(struct amdgpu_userq_mgr *uq_mgr,
+                         struct drm_amdgpu_userq_in *args,
+                         struct amdgpu_usermode_queue *queue);
+       void (*mqd_destroy)(struct amdgpu_userq_mgr *uq_mgr,
+                           struct amdgpu_usermode_queue *uq);
+       int (*unmap)(struct amdgpu_userq_mgr *uq_mgr,
+                    struct amdgpu_usermode_queue *queue);
+       int (*map)(struct amdgpu_userq_mgr *uq_mgr,
+                  struct amdgpu_usermode_queue *queue);
+};
+
+/* Usermode queues for gfx */
+struct amdgpu_userq_mgr {
+       struct idr                      userq_idr;
+       struct mutex                    userq_mutex;
+       struct amdgpu_device            *adev;
+       struct delayed_work             resume_work;
+       struct list_head                list;
+};
+
+struct amdgpu_db_info {
+       uint64_t doorbell_handle;
+       uint32_t queue_type;
+       uint32_t doorbell_offset;
+       struct amdgpu_userq_obj *db_obj;
+};
+
+int amdgpu_userq_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
+
+int amdgpu_userq_mgr_init(struct amdgpu_userq_mgr *userq_mgr, struct amdgpu_device *adev);
+
+void amdgpu_userq_mgr_fini(struct amdgpu_userq_mgr *userq_mgr);
+
+int amdgpu_userq_create_object(struct amdgpu_userq_mgr *uq_mgr,
+                              struct amdgpu_userq_obj *userq_obj,
+                              int size);
+
+void amdgpu_userq_destroy_object(struct amdgpu_userq_mgr *uq_mgr,
+                                struct amdgpu_userq_obj *userq_obj);
+
+void amdgpu_userq_evict(struct amdgpu_userq_mgr *uq_mgr,
+                       struct amdgpu_eviction_fence *ev_fence);
+
+int amdgpu_userq_active(struct amdgpu_userq_mgr *uq_mgr);
+
+void amdgpu_userq_ensure_ev_fence(struct amdgpu_userq_mgr *userq_mgr,
+                                 struct amdgpu_eviction_fence_mgr *evf_mgr);
+
+uint64_t amdgpu_userq_get_doorbell_index(struct amdgpu_userq_mgr *uq_mgr,
+                                        struct amdgpu_db_info *db_info,
+                                            struct drm_file *filp);
+
+u32 amdgpu_userq_get_supported_ip_mask(struct amdgpu_device *adev);
+
+int amdgpu_userq_suspend(struct amdgpu_device *adev);
+int amdgpu_userq_resume(struct amdgpu_device *adev);
+
+int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev,
+                                                 u32 idx);
+int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev,
+                                                  u32 idx);
+
+#endif
index ca198360cfdad9142ae6b1c2701449673cbcd6e5..be068e8e37d17613d3fa3c4751b1e6ac62d64dbc 100644 (file)
@@ -292,7 +292,7 @@ static int amdgpu_userq_fence_create(struct amdgpu_usermode_queue *userq,
 
 static const char *amdgpu_userq_fence_get_driver_name(struct dma_fence *f)
 {
-       return "amdgpu_userqueue_fence";
+       return "amdgpu_userq_fence";
 }
 
 static const char *amdgpu_userq_fence_get_timeline_name(struct dma_fence *f)
@@ -513,7 +513,7 @@ int amdgpu_userq_signal_ioctl(struct drm_device *dev, void *data,
                goto put_gobj_write;
 
        /* We are here means UQ is active, make sure the eviction fence is valid */
-       amdgpu_userqueue_ensure_ev_fence(&fpriv->userq_mgr, &fpriv->evf_mgr);
+       amdgpu_userq_ensure_ev_fence(&fpriv->userq_mgr, &fpriv->evf_mgr);
 
        /* Create a new fence */
        r = amdgpu_userq_fence_create(queue, userq_fence, wptr, &fence);
index 2af4e0c157732be3504da98a67ec2771d87094f3..97a125ab8a78696958c45fa8f566e1872f88746f 100644 (file)
@@ -27,7 +27,7 @@
 
 #include <linux/types.h>
 
-#include "amdgpu_userqueue.h"
+#include "amdgpu_userq.h"
 
 struct amdgpu_userq_fence {
        struct dma_fence base;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c
deleted file mode 100644 (file)
index 82741dc..0000000
+++ /dev/null
@@ -1,915 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright 2023 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <drm/drm_auth.h>
-#include <drm/drm_exec.h>
-#include <linux/pm_runtime.h>
-
-#include "amdgpu.h"
-#include "amdgpu_vm.h"
-#include "amdgpu_userqueue.h"
-#include "amdgpu_userq_fence.h"
-
-u32 amdgpu_userqueue_get_supported_ip_mask(struct amdgpu_device *adev)
-{
-       int i;
-       u32 userq_ip_mask = 0;
-
-       for (i = 0; i < AMDGPU_HW_IP_NUM; i++) {
-               if (adev->userq_funcs[i])
-                       userq_ip_mask |= (1 << i);
-       }
-
-       return userq_ip_mask;
-}
-
-static int
-amdgpu_userqueue_unmap_helper(struct amdgpu_userq_mgr *uq_mgr,
-                             struct amdgpu_usermode_queue *queue)
-{
-       struct amdgpu_device *adev = uq_mgr->adev;
-       const struct amdgpu_userq_funcs *userq_funcs =
-               adev->userq_funcs[queue->queue_type];
-       int r = 0;
-
-       if (queue->state == AMDGPU_USERQ_STATE_MAPPED) {
-               r = userq_funcs->unmap(uq_mgr, queue);
-               if (r)
-                       queue->state = AMDGPU_USERQ_STATE_HUNG;
-               else
-                       queue->state = AMDGPU_USERQ_STATE_UNMAPPED;
-       }
-       return r;
-}
-
-static int
-amdgpu_userqueue_map_helper(struct amdgpu_userq_mgr *uq_mgr,
-                           struct amdgpu_usermode_queue *queue)
-{
-       struct amdgpu_device *adev = uq_mgr->adev;
-       const struct amdgpu_userq_funcs *userq_funcs =
-               adev->userq_funcs[queue->queue_type];
-       int r = 0;
-
-       if (queue->state == AMDGPU_USERQ_STATE_UNMAPPED) {
-               r = userq_funcs->map(uq_mgr, queue);
-               if (r) {
-                       queue->state = AMDGPU_USERQ_STATE_HUNG;
-               } else {
-                       queue->state = AMDGPU_USERQ_STATE_MAPPED;
-               }
-       }
-       return r;
-}
-
-static void
-amdgpu_userqueue_wait_for_last_fence(struct amdgpu_userq_mgr *uq_mgr,
-                                    struct amdgpu_usermode_queue *queue)
-{
-       struct amdgpu_device *adev = uq_mgr->adev;
-       struct dma_fence *f = queue->last_fence;
-       int ret;
-
-       if (f && !dma_fence_is_signaled(f)) {
-               ret = dma_fence_wait_timeout(f, true, msecs_to_jiffies(100));
-               if (ret <= 0)
-                       dev_err(adev->dev, "Timed out waiting for fence f=%p\n", f);
-       }
-}
-
-static void
-amdgpu_userqueue_cleanup(struct amdgpu_userq_mgr *uq_mgr,
-                        struct amdgpu_usermode_queue *queue,
-                        int queue_id)
-{
-       struct amdgpu_device *adev = uq_mgr->adev;
-       const struct amdgpu_userq_funcs *uq_funcs = adev->userq_funcs[queue->queue_type];
-
-       uq_funcs->mqd_destroy(uq_mgr, queue);
-       amdgpu_userq_fence_driver_free(queue);
-       idr_remove(&uq_mgr->userq_idr, queue_id);
-       kfree(queue);
-}
-
-int
-amdgpu_userqueue_active(struct amdgpu_userq_mgr *uq_mgr)
-{
-       struct amdgpu_usermode_queue *queue;
-       int queue_id;
-       int ret = 0;
-
-       mutex_lock(&uq_mgr->userq_mutex);
-       /* Resume all the queues for this process */
-       idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id)
-               ret += queue->state == AMDGPU_USERQ_STATE_MAPPED;
-
-       mutex_unlock(&uq_mgr->userq_mutex);
-       return ret;
-}
-
-#ifdef CONFIG_DRM_AMDGPU_NAVI3X_USERQ
-static struct amdgpu_usermode_queue *
-amdgpu_userqueue_find(struct amdgpu_userq_mgr *uq_mgr, int qid)
-{
-       return idr_find(&uq_mgr->userq_idr, qid);
-}
-
-void
-amdgpu_userqueue_ensure_ev_fence(struct amdgpu_userq_mgr *uq_mgr,
-                                struct amdgpu_eviction_fence_mgr *evf_mgr)
-{
-       struct amdgpu_eviction_fence *ev_fence;
-
-retry:
-       /* Flush any pending resume work to create ev_fence */
-       flush_delayed_work(&uq_mgr->resume_work);
-
-       mutex_lock(&uq_mgr->userq_mutex);
-       spin_lock(&evf_mgr->ev_fence_lock);
-       ev_fence = evf_mgr->ev_fence;
-       spin_unlock(&evf_mgr->ev_fence_lock);
-       if (!ev_fence || dma_fence_is_signaled(&ev_fence->base)) {
-               mutex_unlock(&uq_mgr->userq_mutex);
-               /*
-                * Looks like there was no pending resume work,
-                * add one now to create a valid eviction fence
-                */
-               schedule_delayed_work(&uq_mgr->resume_work, 0);
-               goto retry;
-       }
-}
-
-int amdgpu_userqueue_create_object(struct amdgpu_userq_mgr *uq_mgr,
-                                  struct amdgpu_userq_obj *userq_obj,
-                                  int size)
-{
-       struct amdgpu_device *adev = uq_mgr->adev;
-       struct amdgpu_bo_param bp;
-       int r;
-
-       memset(&bp, 0, sizeof(bp));
-       bp.byte_align = PAGE_SIZE;
-       bp.domain = AMDGPU_GEM_DOMAIN_GTT;
-       bp.flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
-                  AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
-       bp.type = ttm_bo_type_kernel;
-       bp.size = size;
-       bp.resv = NULL;
-       bp.bo_ptr_size = sizeof(struct amdgpu_bo);
-
-       r = amdgpu_bo_create(adev, &bp, &userq_obj->obj);
-       if (r) {
-               DRM_ERROR("Failed to allocate BO for userqueue (%d)", r);
-               return r;
-       }
-
-       r = amdgpu_bo_reserve(userq_obj->obj, true);
-       if (r) {
-               DRM_ERROR("Failed to reserve BO to map (%d)", r);
-               goto free_obj;
-       }
-
-       r = amdgpu_ttm_alloc_gart(&(userq_obj->obj)->tbo);
-       if (r) {
-               DRM_ERROR("Failed to alloc GART for userqueue object (%d)", r);
-               goto unresv;
-       }
-
-       r = amdgpu_bo_kmap(userq_obj->obj, &userq_obj->cpu_ptr);
-       if (r) {
-               DRM_ERROR("Failed to map BO for userqueue (%d)", r);
-               goto unresv;
-       }
-
-       userq_obj->gpu_addr = amdgpu_bo_gpu_offset(userq_obj->obj);
-       amdgpu_bo_unreserve(userq_obj->obj);
-       memset(userq_obj->cpu_ptr, 0, size);
-       return 0;
-
-unresv:
-       amdgpu_bo_unreserve(userq_obj->obj);
-
-free_obj:
-       amdgpu_bo_unref(&userq_obj->obj);
-       return r;
-}
-
-void amdgpu_userqueue_destroy_object(struct amdgpu_userq_mgr *uq_mgr,
-                                  struct amdgpu_userq_obj *userq_obj)
-{
-       amdgpu_bo_kunmap(userq_obj->obj);
-       amdgpu_bo_unref(&userq_obj->obj);
-}
-
-uint64_t
-amdgpu_userqueue_get_doorbell_index(struct amdgpu_userq_mgr *uq_mgr,
-                                    struct amdgpu_db_info *db_info,
-                                    struct drm_file *filp)
-{
-       uint64_t index;
-       struct drm_gem_object *gobj;
-       struct amdgpu_userq_obj *db_obj = db_info->db_obj;
-       int r, db_size;
-
-       gobj = drm_gem_object_lookup(filp, db_info->doorbell_handle);
-       if (gobj == NULL) {
-               DRM_ERROR("Can't find GEM object for doorbell\n");
-               return -EINVAL;
-       }
-
-       db_obj->obj = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
-       drm_gem_object_put(gobj);
-
-       /* Pin the BO before generating the index, unpin in queue destroy */
-       r = amdgpu_bo_pin(db_obj->obj, AMDGPU_GEM_DOMAIN_DOORBELL);
-       if (r) {
-               DRM_ERROR("[Usermode queues] Failed to pin doorbell object\n");
-               goto unref_bo;
-       }
-
-       r = amdgpu_bo_reserve(db_obj->obj, true);
-       if (r) {
-               DRM_ERROR("[Usermode queues] Failed to pin doorbell object\n");
-               goto unpin_bo;
-       }
-
-       switch (db_info->queue_type) {
-       case AMDGPU_HW_IP_GFX:
-       case AMDGPU_HW_IP_COMPUTE:
-       case AMDGPU_HW_IP_DMA:
-               db_size = sizeof(u64);
-               break;
-
-       case AMDGPU_HW_IP_VCN_ENC:
-               db_size = sizeof(u32);
-               db_info->doorbell_offset += AMDGPU_NAVI10_DOORBELL64_VCN0_1 << 1;
-               break;
-
-       case AMDGPU_HW_IP_VPE:
-               db_size = sizeof(u32);
-               db_info->doorbell_offset += AMDGPU_NAVI10_DOORBELL64_VPE << 1;
-               break;
-
-       default:
-               DRM_ERROR("[Usermode queues] IP %d not support\n", db_info->queue_type);
-               r = -EINVAL;
-               goto unpin_bo;
-       }
-
-       index = amdgpu_doorbell_index_on_bar(uq_mgr->adev, db_obj->obj,
-                                            db_info->doorbell_offset, db_size);
-       DRM_DEBUG_DRIVER("[Usermode queues] doorbell index=%lld\n", index);
-       amdgpu_bo_unreserve(db_obj->obj);
-       return index;
-
-unpin_bo:
-       amdgpu_bo_unpin(db_obj->obj);
-
-unref_bo:
-       amdgpu_bo_unref(&db_obj->obj);
-       return r;
-}
-
-static int
-amdgpu_userqueue_destroy(struct drm_file *filp, int queue_id)
-{
-       struct amdgpu_fpriv *fpriv = filp->driver_priv;
-       struct amdgpu_userq_mgr *uq_mgr = &fpriv->userq_mgr;
-       struct amdgpu_device *adev = uq_mgr->adev;
-       struct amdgpu_usermode_queue *queue;
-       int r = 0;
-
-       cancel_delayed_work(&uq_mgr->resume_work);
-       mutex_lock(&uq_mgr->userq_mutex);
-
-       queue = amdgpu_userqueue_find(uq_mgr, queue_id);
-       if (!queue) {
-               DRM_DEBUG_DRIVER("Invalid queue id to destroy\n");
-               mutex_unlock(&uq_mgr->userq_mutex);
-               return -EINVAL;
-       }
-       amdgpu_userqueue_wait_for_last_fence(uq_mgr, queue);
-       r = amdgpu_userqueue_unmap_helper(uq_mgr, queue);
-       amdgpu_bo_unpin(queue->db_obj.obj);
-       amdgpu_bo_unref(&queue->db_obj.obj);
-       amdgpu_userqueue_cleanup(uq_mgr, queue, queue_id);
-       mutex_unlock(&uq_mgr->userq_mutex);
-
-       pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
-       pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
-
-       return r;
-}
-
-static int amdgpu_userq_priority_permit(struct drm_file *filp,
-                                       int priority)
-{
-       if (priority < AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_HIGH)
-               return 0;
-
-       if (capable(CAP_SYS_NICE))
-               return 0;
-
-       if (drm_is_current_master(filp))
-               return 0;
-
-       return -EACCES;
-}
-
-static int
-amdgpu_userqueue_create(struct drm_file *filp, union drm_amdgpu_userq *args)
-{
-       struct amdgpu_fpriv *fpriv = filp->driver_priv;
-       struct amdgpu_userq_mgr *uq_mgr = &fpriv->userq_mgr;
-       struct amdgpu_device *adev = uq_mgr->adev;
-       const struct amdgpu_userq_funcs *uq_funcs;
-       struct amdgpu_usermode_queue *queue;
-       struct amdgpu_db_info db_info;
-       bool skip_map_queue;
-       uint64_t index;
-       int qid, r = 0;
-       int priority =
-               (args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK) >>
-               AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_SHIFT;
-
-       /* Usermode queues are only supported for GFX IP as of now */
-       if (args->in.ip_type != AMDGPU_HW_IP_GFX &&
-           args->in.ip_type != AMDGPU_HW_IP_DMA &&
-           args->in.ip_type != AMDGPU_HW_IP_COMPUTE) {
-               DRM_ERROR("Usermode queue doesn't support IP type %u\n", args->in.ip_type);
-               return -EINVAL;
-       }
-
-       r = amdgpu_userq_priority_permit(filp, priority);
-       if (r)
-               return r;
-
-       if ((args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE) &&
-           (args->in.ip_type != AMDGPU_HW_IP_GFX) &&
-           (args->in.ip_type != AMDGPU_HW_IP_COMPUTE) &&
-           !amdgpu_is_tmz(adev)) {
-               drm_err(adev_to_drm(adev), "Secure only supported on GFX/Compute queues\n");
-               return -EINVAL;
-       }
-
-       r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
-       if (r < 0) {
-               dev_err(adev->dev, "pm_runtime_get_sync() failed for userqueue create\n");
-               pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
-               return r;
-       }
-
-       /*
-        * There could be a situation that we are creating a new queue while
-        * the other queues under this UQ_mgr are suspended. So if there is any
-        * resume work pending, wait for it to get done.
-        *
-        * This will also make sure we have a valid eviction fence ready to be used.
-        */
-       amdgpu_userqueue_ensure_ev_fence(&fpriv->userq_mgr, &fpriv->evf_mgr);
-
-       uq_funcs = adev->userq_funcs[args->in.ip_type];
-       if (!uq_funcs) {
-               DRM_ERROR("Usermode queue is not supported for this IP (%u)\n", args->in.ip_type);
-               r = -EINVAL;
-               goto unlock;
-       }
-
-       queue = kzalloc(sizeof(struct amdgpu_usermode_queue), GFP_KERNEL);
-       if (!queue) {
-               DRM_ERROR("Failed to allocate memory for queue\n");
-               r = -ENOMEM;
-               goto unlock;
-       }
-       queue->doorbell_handle = args->in.doorbell_handle;
-       queue->queue_type = args->in.ip_type;
-       queue->vm = &fpriv->vm;
-       queue->priority = priority;
-
-       db_info.queue_type = queue->queue_type;
-       db_info.doorbell_handle = queue->doorbell_handle;
-       db_info.db_obj = &queue->db_obj;
-       db_info.doorbell_offset = args->in.doorbell_offset;
-
-       /* Convert relative doorbell offset into absolute doorbell index */
-       index = amdgpu_userqueue_get_doorbell_index(uq_mgr, &db_info, filp);
-       if (index == (uint64_t)-EINVAL) {
-               DRM_ERROR("Failed to get doorbell for queue\n");
-               kfree(queue);
-               goto unlock;
-       }
-
-       queue->doorbell_index = index;
-       xa_init_flags(&queue->fence_drv_xa, XA_FLAGS_ALLOC);
-       r = amdgpu_userq_fence_driver_alloc(adev, queue);
-       if (r) {
-               DRM_ERROR("Failed to alloc fence driver\n");
-               goto unlock;
-       }
-
-       r = uq_funcs->mqd_create(uq_mgr, &args->in, queue);
-       if (r) {
-               DRM_ERROR("Failed to create Queue\n");
-               amdgpu_userq_fence_driver_free(queue);
-               kfree(queue);
-               goto unlock;
-       }
-
-
-       qid = idr_alloc(&uq_mgr->userq_idr, queue, 1, AMDGPU_MAX_USERQ_COUNT, GFP_KERNEL);
-       if (qid < 0) {
-               DRM_ERROR("Failed to allocate a queue id\n");
-               amdgpu_userq_fence_driver_free(queue);
-               uq_funcs->mqd_destroy(uq_mgr, queue);
-               kfree(queue);
-               r = -ENOMEM;
-               goto unlock;
-       }
-
-       /* don't map the queue if scheduling is halted */
-       mutex_lock(&adev->userq_mutex);
-       if (adev->userq_halt_for_enforce_isolation &&
-           ((queue->queue_type == AMDGPU_HW_IP_GFX) ||
-            (queue->queue_type == AMDGPU_HW_IP_COMPUTE)))
-               skip_map_queue = true;
-       else
-               skip_map_queue = false;
-       if (!skip_map_queue) {
-               r = amdgpu_userqueue_map_helper(uq_mgr, queue);
-               if (r) {
-                       mutex_unlock(&adev->userq_mutex);
-                       DRM_ERROR("Failed to map Queue\n");
-                       idr_remove(&uq_mgr->userq_idr, qid);
-                       amdgpu_userq_fence_driver_free(queue);
-                       uq_funcs->mqd_destroy(uq_mgr, queue);
-                       kfree(queue);
-                       goto unlock;
-               }
-       }
-       mutex_unlock(&adev->userq_mutex);
-
-
-       args->out.queue_id = qid;
-
-unlock:
-       mutex_unlock(&uq_mgr->userq_mutex);
-
-       return r;
-}
-
-int amdgpu_userq_ioctl(struct drm_device *dev, void *data,
-                      struct drm_file *filp)
-{
-       union drm_amdgpu_userq *args = data;
-       int r;
-
-       switch (args->in.op) {
-       case AMDGPU_USERQ_OP_CREATE:
-               if (args->in.flags & ~(AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK |
-                                      AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE))
-                       return -EINVAL;
-               r = amdgpu_userqueue_create(filp, args);
-               if (r)
-                       DRM_ERROR("Failed to create usermode queue\n");
-               break;
-
-       case AMDGPU_USERQ_OP_FREE:
-               if (args->in.ip_type ||
-                   args->in.doorbell_handle ||
-                   args->in.doorbell_offset ||
-                   args->in.flags ||
-                   args->in.queue_va ||
-                   args->in.queue_size ||
-                   args->in.rptr_va ||
-                   args->in.wptr_va ||
-                   args->in.wptr_va ||
-                   args->in.mqd ||
-                   args->in.mqd_size)
-                       return -EINVAL;
-               r = amdgpu_userqueue_destroy(filp, args->in.queue_id);
-               if (r)
-                       DRM_ERROR("Failed to destroy usermode queue\n");
-               break;
-
-       default:
-               DRM_DEBUG_DRIVER("Invalid user queue op specified: %d\n", args->in.op);
-               return -EINVAL;
-       }
-
-       return r;
-}
-#else
-int amdgpu_userq_ioctl(struct drm_device *dev, void *data,
-                      struct drm_file *filp)
-{
-       return -ENOTSUPP;
-}
-#endif
-
-static int
-amdgpu_userqueue_restore_all(struct amdgpu_userq_mgr *uq_mgr)
-{
-       struct amdgpu_device *adev = uq_mgr->adev;
-       struct amdgpu_usermode_queue *queue;
-       int queue_id;
-       int ret = 0, r;
-
-       /* Resume all the queues for this process */
-       idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) {
-               r = amdgpu_userqueue_map_helper(uq_mgr, queue);
-               if (r)
-                       ret = r;
-       }
-
-       if (ret)
-               dev_err(adev->dev, "Failed to map all the queues\n");
-       return ret;
-}
-
-static int
-amdgpu_userqueue_validate_vm_bo(void *_unused, struct amdgpu_bo *bo)
-{
-       struct ttm_operation_ctx ctx = { false, false };
-       int ret;
-
-       amdgpu_bo_placement_from_domain(bo, bo->allowed_domains);
-
-       ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
-       if (ret)
-               DRM_ERROR("Fail to validate\n");
-
-       return ret;
-}
-
-static int
-amdgpu_userqueue_validate_bos(struct amdgpu_userq_mgr *uq_mgr)
-{
-       struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
-       struct amdgpu_vm *vm = &fpriv->vm;
-       struct amdgpu_device *adev = uq_mgr->adev;
-       struct amdgpu_bo_va *bo_va;
-       struct ww_acquire_ctx *ticket;
-       struct drm_exec exec;
-       struct amdgpu_bo *bo;
-       struct dma_resv *resv;
-       bool clear, unlock;
-       int ret = 0;
-
-       drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0);
-       drm_exec_until_all_locked(&exec) {
-               ret = amdgpu_vm_lock_pd(vm, &exec, 2);
-               drm_exec_retry_on_contention(&exec);
-               if (unlikely(ret)) {
-                       DRM_ERROR("Failed to lock PD\n");
-                       goto unlock_all;
-               }
-
-               /* Lock the done list */
-               list_for_each_entry(bo_va, &vm->done, base.vm_status) {
-                       bo = bo_va->base.bo;
-                       if (!bo)
-                               continue;
-
-                       ret = drm_exec_lock_obj(&exec, &bo->tbo.base);
-                       drm_exec_retry_on_contention(&exec);
-                       if (unlikely(ret))
-                               goto unlock_all;
-               }
-       }
-
-       spin_lock(&vm->status_lock);
-       while (!list_empty(&vm->moved)) {
-               bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va,
-                                        base.vm_status);
-               spin_unlock(&vm->status_lock);
-
-               /* Per VM BOs never need to bo cleared in the page tables */
-               ret = amdgpu_vm_bo_update(adev, bo_va, false);
-               if (ret)
-                       goto unlock_all;
-               spin_lock(&vm->status_lock);
-       }
-
-       ticket = &exec.ticket;
-       while (!list_empty(&vm->invalidated)) {
-               bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va,
-                                        base.vm_status);
-               resv = bo_va->base.bo->tbo.base.resv;
-               spin_unlock(&vm->status_lock);
-
-               bo = bo_va->base.bo;
-               ret = amdgpu_userqueue_validate_vm_bo(NULL, bo);
-               if (ret) {
-                       DRM_ERROR("Failed to validate BO\n");
-                       goto unlock_all;
-               }
-
-               /* Try to reserve the BO to avoid clearing its ptes */
-               if (!adev->debug_vm && dma_resv_trylock(resv)) {
-                       clear = false;
-                       unlock = true;
-               /* The caller is already holding the reservation lock */
-               } else if (ticket && dma_resv_locking_ctx(resv) == ticket) {
-                       clear = false;
-                       unlock = false;
-               /* Somebody else is using the BO right now */
-               } else {
-                       clear = true;
-                       unlock = false;
-               }
-
-               ret = amdgpu_vm_bo_update(adev, bo_va, clear);
-
-               if (unlock)
-                       dma_resv_unlock(resv);
-               if (ret)
-                       goto unlock_all;
-
-               spin_lock(&vm->status_lock);
-       }
-       spin_unlock(&vm->status_lock);
-
-       ret = amdgpu_eviction_fence_replace_fence(&fpriv->evf_mgr, &exec);
-       if (ret)
-               DRM_ERROR("Failed to replace eviction fence\n");
-
-unlock_all:
-       drm_exec_fini(&exec);
-       return ret;
-}
-
-static void amdgpu_userqueue_restore_worker(struct work_struct *work)
-{
-       struct amdgpu_userq_mgr *uq_mgr = work_to_uq_mgr(work, resume_work.work);
-       struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
-       int ret;
-
-       flush_work(&fpriv->evf_mgr.suspend_work.work);
-
-       mutex_lock(&uq_mgr->userq_mutex);
-
-       ret = amdgpu_userqueue_validate_bos(uq_mgr);
-       if (ret) {
-               DRM_ERROR("Failed to validate BOs to restore\n");
-               goto unlock;
-       }
-
-       ret = amdgpu_userqueue_restore_all(uq_mgr);
-       if (ret) {
-               DRM_ERROR("Failed to restore all queues\n");
-               goto unlock;
-       }
-
-unlock:
-       mutex_unlock(&uq_mgr->userq_mutex);
-}
-
-static int
-amdgpu_userqueue_evict_all(struct amdgpu_userq_mgr *uq_mgr)
-{
-       struct amdgpu_device *adev = uq_mgr->adev;
-       struct amdgpu_usermode_queue *queue;
-       int queue_id;
-       int ret = 0, r;
-
-       /* Try to unmap all the queues in this process ctx */
-       idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) {
-               r = amdgpu_userqueue_unmap_helper(uq_mgr, queue);
-               if (r)
-                       ret = r;
-       }
-
-       if (ret)
-               dev_err(adev->dev, "Couldn't unmap all the queues\n");
-       return ret;
-}
-
-static int
-amdgpu_userqueue_wait_for_signal(struct amdgpu_userq_mgr *uq_mgr)
-{
-       struct amdgpu_usermode_queue *queue;
-       int queue_id, ret;
-
-       idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) {
-               struct dma_fence *f = queue->last_fence;
-
-               if (!f || dma_fence_is_signaled(f))
-                       continue;
-               ret = dma_fence_wait_timeout(f, true, msecs_to_jiffies(100));
-               if (ret <= 0) {
-                       DRM_ERROR("Timed out waiting for fence=%llu:%llu\n",
-                                 f->context, f->seqno);
-                       return -ETIMEDOUT;
-               }
-       }
-
-       return 0;
-}
-
-void
-amdgpu_userqueue_evict(struct amdgpu_userq_mgr *uq_mgr,
-                      struct amdgpu_eviction_fence *ev_fence)
-{
-       int ret;
-       struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
-       struct amdgpu_eviction_fence_mgr *evf_mgr = &fpriv->evf_mgr;
-
-       /* Wait for any pending userqueue fence work to finish */
-       ret = amdgpu_userqueue_wait_for_signal(uq_mgr);
-       if (ret) {
-               DRM_ERROR("Not evicting userqueue, timeout waiting for work\n");
-               return;
-       }
-
-       ret = amdgpu_userqueue_evict_all(uq_mgr);
-       if (ret) {
-               DRM_ERROR("Failed to evict userqueue\n");
-               return;
-       }
-
-       /* Signal current eviction fence */
-       amdgpu_eviction_fence_signal(evf_mgr, ev_fence);
-
-       if (evf_mgr->fd_closing) {
-               cancel_delayed_work(&uq_mgr->resume_work);
-               return;
-       }
-
-       /* Schedule a resume work */
-       schedule_delayed_work(&uq_mgr->resume_work, 0);
-}
-
-int amdgpu_userq_mgr_init(struct amdgpu_userq_mgr *userq_mgr, struct amdgpu_device *adev)
-{
-       mutex_init(&userq_mgr->userq_mutex);
-       idr_init_base(&userq_mgr->userq_idr, 1);
-       userq_mgr->adev = adev;
-
-       mutex_lock(&adev->userq_mutex);
-       list_add(&userq_mgr->list, &adev->userq_mgr_list);
-       mutex_unlock(&adev->userq_mutex);
-
-       INIT_DELAYED_WORK(&userq_mgr->resume_work, amdgpu_userqueue_restore_worker);
-       return 0;
-}
-
-void amdgpu_userq_mgr_fini(struct amdgpu_userq_mgr *userq_mgr)
-{
-       struct amdgpu_device *adev = userq_mgr->adev;
-       struct amdgpu_usermode_queue *queue;
-       struct amdgpu_userq_mgr *uqm, *tmp;
-       uint32_t queue_id;
-
-       cancel_delayed_work(&userq_mgr->resume_work);
-
-       mutex_lock(&userq_mgr->userq_mutex);
-       idr_for_each_entry(&userq_mgr->userq_idr, queue, queue_id) {
-               amdgpu_userqueue_wait_for_last_fence(userq_mgr, queue);
-               amdgpu_userqueue_unmap_helper(userq_mgr, queue);
-               amdgpu_userqueue_cleanup(userq_mgr, queue, queue_id);
-       }
-       mutex_lock(&adev->userq_mutex);
-       list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
-               if (uqm == userq_mgr) {
-                       list_del(&uqm->list);
-                       break;
-               }
-       }
-       mutex_unlock(&adev->userq_mutex);
-       idr_destroy(&userq_mgr->userq_idr);
-       mutex_unlock(&userq_mgr->userq_mutex);
-       mutex_destroy(&userq_mgr->userq_mutex);
-}
-
-int amdgpu_userq_suspend(struct amdgpu_device *adev)
-{
-       u32 ip_mask = amdgpu_userqueue_get_supported_ip_mask(adev);
-       struct amdgpu_usermode_queue *queue;
-       struct amdgpu_userq_mgr *uqm, *tmp;
-       int queue_id;
-       int ret = 0, r;
-
-       if (!ip_mask)
-               return 0;
-
-       mutex_lock(&adev->userq_mutex);
-       list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
-               cancel_delayed_work_sync(&uqm->resume_work);
-               idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
-                       r = amdgpu_userqueue_unmap_helper(uqm, queue);
-                       if (r)
-                               ret = r;
-               }
-       }
-       mutex_unlock(&adev->userq_mutex);
-       return ret;
-}
-
-int amdgpu_userq_resume(struct amdgpu_device *adev)
-{
-       u32 ip_mask = amdgpu_userqueue_get_supported_ip_mask(adev);
-       struct amdgpu_usermode_queue *queue;
-       struct amdgpu_userq_mgr *uqm, *tmp;
-       int queue_id;
-       int ret = 0, r;
-
-       if (!ip_mask)
-               return 0;
-
-       mutex_lock(&adev->userq_mutex);
-       list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
-               idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
-                       r = amdgpu_userqueue_map_helper(uqm, queue);
-                       if (r)
-                               ret = r;
-               }
-       }
-       mutex_unlock(&adev->userq_mutex);
-       return ret;
-}
-
-int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev,
-                                                 u32 idx)
-{
-       u32 ip_mask = amdgpu_userqueue_get_supported_ip_mask(adev);
-       struct amdgpu_usermode_queue *queue;
-       struct amdgpu_userq_mgr *uqm, *tmp;
-       int queue_id;
-       int ret = 0, r;
-
-       /* only need to stop gfx/compute */
-       if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 << AMDGPU_HW_IP_COMPUTE))))
-               return 0;
-
-       mutex_lock(&adev->userq_mutex);
-       if (adev->userq_halt_for_enforce_isolation)
-               dev_warn(adev->dev, "userq scheduling already stopped!\n");
-       adev->userq_halt_for_enforce_isolation = true;
-       list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
-               cancel_delayed_work_sync(&uqm->resume_work);
-               idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
-                       if (((queue->queue_type == AMDGPU_HW_IP_GFX) ||
-                            (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) &&
-                           (queue->xcp_id == idx)) {
-                               r = amdgpu_userqueue_unmap_helper(uqm, queue);
-                               if (r)
-                                       ret = r;
-                       }
-               }
-       }
-       mutex_unlock(&adev->userq_mutex);
-       return ret;
-}
-
-int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev,
-                                                  u32 idx)
-{
-       u32 ip_mask = amdgpu_userqueue_get_supported_ip_mask(adev);
-       struct amdgpu_usermode_queue *queue;
-       struct amdgpu_userq_mgr *uqm, *tmp;
-       int queue_id;
-       int ret = 0, r;
-
-       /* only need to stop gfx/compute */
-       if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 << AMDGPU_HW_IP_COMPUTE))))
-               return 0;
-
-       mutex_lock(&adev->userq_mutex);
-       if (!adev->userq_halt_for_enforce_isolation)
-               dev_warn(adev->dev, "userq scheduling already started!\n");
-       adev->userq_halt_for_enforce_isolation = false;
-       list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
-               idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
-                       if (((queue->queue_type == AMDGPU_HW_IP_GFX) ||
-                            (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) &&
-                           (queue->xcp_id == idx)) {
-                               r = amdgpu_userqueue_map_helper(uqm, queue);
-                               if (r)
-                                       ret = r;
-                       }
-               }
-       }
-       mutex_unlock(&adev->userq_mutex);
-       return ret;
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.h
deleted file mode 100644 (file)
index a9f0e46..0000000
+++ /dev/null
@@ -1,133 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright 2023 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef AMDGPU_USERQUEUE_H_
-#define AMDGPU_USERQUEUE_H_
-#include "amdgpu_eviction_fence.h"
-
-#define AMDGPU_MAX_USERQ_COUNT 512
-
-#define to_ev_fence(f) container_of(f, struct amdgpu_eviction_fence, base)
-#define uq_mgr_to_fpriv(u) container_of(u, struct amdgpu_fpriv, userq_mgr)
-#define work_to_uq_mgr(w, name) container_of(w, struct amdgpu_userq_mgr, name)
-
-enum amdgpu_userqueue_state {
-       AMDGPU_USERQ_STATE_UNMAPPED = 0,
-       AMDGPU_USERQ_STATE_MAPPED,
-       AMDGPU_USERQ_STATE_PREEMPTED,
-       AMDGPU_USERQ_STATE_HUNG,
-};
-
-struct amdgpu_mqd_prop;
-
-struct amdgpu_userq_obj {
-       void             *cpu_ptr;
-       uint64_t         gpu_addr;
-       struct amdgpu_bo *obj;
-};
-
-struct amdgpu_usermode_queue {
-       int                     queue_type;
-       enum amdgpu_userqueue_state state;
-       uint64_t                doorbell_handle;
-       uint64_t                doorbell_index;
-       uint64_t                flags;
-       struct amdgpu_mqd_prop  *userq_prop;
-       struct amdgpu_userq_mgr *userq_mgr;
-       struct amdgpu_vm        *vm;
-       struct amdgpu_userq_obj mqd;
-       struct amdgpu_userq_obj db_obj;
-       struct amdgpu_userq_obj fw_obj;
-       struct amdgpu_userq_obj wptr_obj;
-       struct xarray           fence_drv_xa;
-       struct amdgpu_userq_fence_driver *fence_drv;
-       struct dma_fence        *last_fence;
-       u32                     xcp_id;
-       int                     priority;
-};
-
-struct amdgpu_userq_funcs {
-       int (*mqd_create)(struct amdgpu_userq_mgr *uq_mgr,
-                         struct drm_amdgpu_userq_in *args,
-                         struct amdgpu_usermode_queue *queue);
-       void (*mqd_destroy)(struct amdgpu_userq_mgr *uq_mgr,
-                           struct amdgpu_usermode_queue *uq);
-       int (*unmap)(struct amdgpu_userq_mgr *uq_mgr,
-                    struct amdgpu_usermode_queue *queue);
-       int (*map)(struct amdgpu_userq_mgr *uq_mgr,
-                  struct amdgpu_usermode_queue *queue);
-};
-
-/* Usermode queues for gfx */
-struct amdgpu_userq_mgr {
-       struct idr                      userq_idr;
-       struct mutex                    userq_mutex;
-       struct amdgpu_device            *adev;
-       struct delayed_work             resume_work;
-       struct list_head                list;
-};
-
-struct amdgpu_db_info {
-       uint64_t doorbell_handle;
-       uint32_t queue_type;
-       uint32_t doorbell_offset;
-       struct amdgpu_userq_obj *db_obj;
-};
-
-int amdgpu_userq_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
-
-int amdgpu_userq_mgr_init(struct amdgpu_userq_mgr *userq_mgr, struct amdgpu_device *adev);
-
-void amdgpu_userq_mgr_fini(struct amdgpu_userq_mgr *userq_mgr);
-
-int amdgpu_userqueue_create_object(struct amdgpu_userq_mgr *uq_mgr,
-                                  struct amdgpu_userq_obj *userq_obj,
-                                  int size);
-
-void amdgpu_userqueue_destroy_object(struct amdgpu_userq_mgr *uq_mgr,
-                                    struct amdgpu_userq_obj *userq_obj);
-
-void amdgpu_userqueue_evict(struct amdgpu_userq_mgr *uq_mgr,
-                           struct amdgpu_eviction_fence *ev_fence);
-
-int amdgpu_userqueue_active(struct amdgpu_userq_mgr *uq_mgr);
-
-void amdgpu_userqueue_ensure_ev_fence(struct amdgpu_userq_mgr *userq_mgr,
-                                     struct amdgpu_eviction_fence_mgr *evf_mgr);
-
-uint64_t amdgpu_userqueue_get_doorbell_index(struct amdgpu_userq_mgr *uq_mgr,
-                                            struct amdgpu_db_info *db_info,
-                                            struct drm_file *filp);
-
-u32 amdgpu_userqueue_get_supported_ip_mask(struct amdgpu_device *adev);
-
-int amdgpu_userq_suspend(struct amdgpu_device *adev);
-int amdgpu_userq_resume(struct amdgpu_device *adev);
-
-int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev,
-                                                 u32 idx);
-int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev,
-                                                  u32 idx);
-
-#endif
index 4c01c3a0309565390a950e137eff1a21d79d200c..d6f50b13e2ba0f02d4cfab668589d7c7c99cd193 100644 (file)
@@ -189,7 +189,7 @@ static int mes_userq_create_ctx_space(struct amdgpu_userq_mgr *uq_mgr,
         * for the same.
         */
        size = AMDGPU_USERQ_PROC_CTX_SZ + AMDGPU_USERQ_GANG_CTX_SZ;
-       r = amdgpu_userqueue_create_object(uq_mgr, ctx, size);
+       r = amdgpu_userq_create_object(uq_mgr, ctx, size);
        if (r) {
                DRM_ERROR("Failed to allocate ctx space bo for userqueue, err:%d\n", r);
                return r;
@@ -222,7 +222,7 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
                goto free_props;
        }
 
-       r = amdgpu_userqueue_create_object(uq_mgr, &queue->mqd, mqd_hw_default->mqd_size);
+       r = amdgpu_userq_create_object(uq_mgr, &queue->mqd, mqd_hw_default->mqd_size);
        if (r) {
                DRM_ERROR("Failed to create MQD object for userqueue\n");
                goto free_props;
@@ -327,10 +327,10 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
        return 0;
 
 free_ctx:
-       amdgpu_userqueue_destroy_object(uq_mgr, &queue->fw_obj);
+       amdgpu_userq_destroy_object(uq_mgr, &queue->fw_obj);
 
 free_mqd:
-       amdgpu_userqueue_destroy_object(uq_mgr, &queue->mqd);
+       amdgpu_userq_destroy_object(uq_mgr, &queue->mqd);
 
 free_props:
        kfree(userq_props);
@@ -342,9 +342,9 @@ static void
 mes_userq_mqd_destroy(struct amdgpu_userq_mgr *uq_mgr,
                      struct amdgpu_usermode_queue *queue)
 {
-       amdgpu_userqueue_destroy_object(uq_mgr, &queue->fw_obj);
+       amdgpu_userq_destroy_object(uq_mgr, &queue->fw_obj);
        kfree(queue->userq_prop);
-       amdgpu_userqueue_destroy_object(uq_mgr, &queue->mqd);
+       amdgpu_userq_destroy_object(uq_mgr, &queue->mqd);
 }
 
 const struct amdgpu_userq_funcs userq_mes_funcs = {
index d0a521312ad4bf1092c002bc6d5d25c1c3983cfa..090ae889777059afd08fb1b0753c1c41eb8d3434 100644 (file)
@@ -24,7 +24,7 @@
 
 #ifndef MES_USERQ_H
 #define MES_USERQ_H
-#include "amdgpu_userqueue.h"
+#include "amdgpu_userq.h"
 
 extern const struct amdgpu_userq_funcs userq_mes_funcs;
 #endif