drm/amdgpu/uq: make MES UQ setup generic
authorAlex Deucher <alexander.deucher@amd.com>
Tue, 26 Nov 2024 14:45:19 +0000 (15:45 +0100)
committerAlex Deucher <alexander.deucher@amd.com>
Tue, 8 Apr 2025 20:48:19 +0000 (16:48 -0400)
Now that all of the IP specific code has been moved into
the IP specific functions, we can make this code generic.

V2: Fixed build errors and porting logics (Shashank)

Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Shashank Sharma <shashank.sharma@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/Makefile
drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
drivers/gpu/drm/amd/amdgpu/mes_userqueue.c [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/mes_userqueue.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/mes_v11_0_userqueue.c [deleted file]
drivers/gpu/drm/amd/amdgpu/mes_v11_0_userqueue.h [deleted file]
drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c

index 0b0c8ec46516c2b46d460c0ee614f85bef639cbf..513c4d64f5542254ebf93d64277cfd8a838152a6 100644 (file)
@@ -177,7 +177,7 @@ amdgpu-y += \
        mes_v12_0.o \
 
 # add GFX userqueue support
-amdgpu-$(CONFIG_DRM_AMDGPU_NAVI3X_USERQ) += mes_v11_0_userqueue.o
+amdgpu-$(CONFIG_DRM_AMDGPU_NAVI3X_USERQ) += mes_userqueue.o
 
 # add UVD block
 amdgpu-y += \
index b8f75e1ba72ca2af4416f22fd4fca621275f3f21..63b7c7bfcc4a0ba0ead3332d512dd8dde2ecaa03 100644 (file)
@@ -48,7 +48,7 @@
 #include "gfx_v11_0_3.h"
 #include "nbio_v4_3.h"
 #include "mes_v11_0.h"
-#include "mes_v11_0_userqueue.h"
+#include "mes_userqueue.h"
 #include "amdgpu_userq_fence.h"
 
 #define GFX11_NUM_GFX_RINGS            1
@@ -1623,8 +1623,8 @@ static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
                adev->gfx.mec.num_pipe_per_mec = 4;
                adev->gfx.mec.num_queue_per_pipe = 4;
 #ifdef CONFIG_DRM_AMDGPU_NAVI3X_USERQ
-               adev->userq_funcs[AMDGPU_HW_IP_GFX] = &userq_mes_v11_0_funcs;
-               adev->userq_funcs[AMDGPU_HW_IP_COMPUTE] = &userq_mes_v11_0_funcs;
+               adev->userq_funcs[AMDGPU_HW_IP_GFX] = &userq_mes_funcs;
+               adev->userq_funcs[AMDGPU_HW_IP_COMPUTE] = &userq_mes_funcs;
 #endif
                break;
        case IP_VERSION(11, 0, 1):
@@ -1640,8 +1640,8 @@ static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
                adev->gfx.mec.num_pipe_per_mec = 4;
                adev->gfx.mec.num_queue_per_pipe = 4;
 #ifdef CONFIG_DRM_AMD_USERQ_GFX
-               adev->userq_funcs[AMDGPU_HW_IP_GFX] = &userq_mes_v11_0_funcs;
-               adev->userq_funcs[AMDGPU_HW_IP_COMPUTE] = &userq_mes_v11_0_funcs;
+               adev->userq_funcs[AMDGPU_HW_IP_GFX] = &userq_mes_funcs;
+               adev->userq_funcs[AMDGPU_HW_IP_COMPUTE] = &userq_mes_funcs;
 #endif
                break;
        default:
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c
new file mode 100644 (file)
index 0000000..9c2fc8a
--- /dev/null
@@ -0,0 +1,386 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2024 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "amdgpu_gfx.h"
+#include "mes_userqueue.h"
+#include "amdgpu_userq_fence.h"
+#include "v11_structs.h"
+
+#define AMDGPU_USERQ_PROC_CTX_SZ PAGE_SIZE
+#define AMDGPU_USERQ_GANG_CTX_SZ PAGE_SIZE
+
+static int
+mes_userq_map_gtt_bo_to_gart(struct amdgpu_bo *bo)
+{
+       int ret;
+
+       ret = amdgpu_bo_reserve(bo, true);
+       if (ret) {
+               DRM_ERROR("Failed to reserve bo. ret %d\n", ret);
+               goto err_reserve_bo_failed;
+       }
+
+       ret = amdgpu_ttm_alloc_gart(&bo->tbo);
+       if (ret) {
+               DRM_ERROR("Failed to bind bo to GART. ret %d\n", ret);
+               goto err_map_bo_gart_failed;
+       }
+
+       amdgpu_bo_unreserve(bo);
+       bo = amdgpu_bo_ref(bo);
+
+       return 0;
+
+err_map_bo_gart_failed:
+       amdgpu_bo_unreserve(bo);
+err_reserve_bo_failed:
+       return ret;
+}
+
+static int
+mes_userq_create_wptr_mapping(struct amdgpu_userq_mgr *uq_mgr,
+                             struct amdgpu_usermode_queue *queue,
+                             uint64_t wptr)
+{
+       struct amdgpu_bo_va_mapping *wptr_mapping;
+       struct amdgpu_vm *wptr_vm;
+       struct amdgpu_userq_obj *wptr_obj = &queue->wptr_obj;
+       int ret;
+
+       wptr_vm = queue->vm;
+       ret = amdgpu_bo_reserve(wptr_vm->root.bo, false);
+       if (ret)
+               return ret;
+
+       wptr &= AMDGPU_GMC_HOLE_MASK;
+       wptr_mapping = amdgpu_vm_bo_lookup_mapping(wptr_vm, wptr >> PAGE_SHIFT);
+       amdgpu_bo_unreserve(wptr_vm->root.bo);
+       if (!wptr_mapping) {
+               DRM_ERROR("Failed to lookup wptr bo\n");
+               return -EINVAL;
+       }
+
+       wptr_obj->obj = wptr_mapping->bo_va->base.bo;
+       if (wptr_obj->obj->tbo.base.size > PAGE_SIZE) {
+               DRM_ERROR("Requested GART mapping for wptr bo larger than one page\n");
+               return -EINVAL;
+       }
+
+       ret = mes_userq_map_gtt_bo_to_gart(wptr_obj->obj);
+       if (ret) {
+               DRM_ERROR("Failed to map wptr bo to GART\n");
+               return ret;
+       }
+
+       queue->wptr_obj.gpu_addr = amdgpu_bo_gpu_offset_no_check(wptr_obj->obj);
+       return 0;
+}
+
+static int mes_userq_map(struct amdgpu_userq_mgr *uq_mgr,
+                        struct amdgpu_usermode_queue *queue,
+                        struct amdgpu_mqd_prop *userq_props)
+{
+       struct amdgpu_device *adev = uq_mgr->adev;
+       struct amdgpu_userq_obj *ctx = &queue->fw_obj;
+       struct mes_add_queue_input queue_input;
+       int r;
+
+       memset(&queue_input, 0x0, sizeof(struct mes_add_queue_input));
+
+       queue_input.process_va_start = 0;
+       queue_input.process_va_end = (adev->vm_manager.max_pfn - 1) << AMDGPU_GPU_PAGE_SHIFT;
+
+       /* set process quantum to 10 ms and gang quantum to 1 ms as default */
+       queue_input.process_quantum = 100000;
+       queue_input.gang_quantum = 10000;
+       queue_input.paging = false;
+
+       queue_input.process_context_addr = ctx->gpu_addr;
+       queue_input.gang_context_addr = ctx->gpu_addr + AMDGPU_USERQ_PROC_CTX_SZ;
+       queue_input.inprocess_gang_priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
+       queue_input.gang_global_priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
+
+       queue_input.process_id = queue->vm->pasid;
+       queue_input.queue_type = queue->queue_type;
+       queue_input.mqd_addr = queue->mqd.gpu_addr;
+       queue_input.wptr_addr = userq_props->wptr_gpu_addr;
+       queue_input.queue_size = userq_props->queue_size >> 2;
+       queue_input.doorbell_offset = userq_props->doorbell_index;
+       queue_input.page_table_base_addr = amdgpu_gmc_pd_addr(queue->vm->root.bo);
+       queue_input.wptr_mc_addr = queue->wptr_obj.gpu_addr;
+
+       amdgpu_mes_lock(&adev->mes);
+       r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input);
+       amdgpu_mes_unlock(&adev->mes);
+       if (r) {
+               DRM_ERROR("Failed to map queue in HW, err (%d)\n", r);
+               return r;
+       }
+
+       queue->queue_active = true;
+       DRM_DEBUG_DRIVER("Queue (doorbell:%d) mapped successfully\n", userq_props->doorbell_index);
+       return 0;
+}
+
+static void mes_userq_unmap(struct amdgpu_userq_mgr *uq_mgr,
+                           struct amdgpu_usermode_queue *queue)
+{
+       struct amdgpu_device *adev = uq_mgr->adev;
+       struct mes_remove_queue_input queue_input;
+       struct amdgpu_userq_obj *ctx = &queue->fw_obj;
+       int r;
+
+       memset(&queue_input, 0x0, sizeof(struct mes_remove_queue_input));
+       queue_input.doorbell_offset = queue->doorbell_index;
+       queue_input.gang_context_addr = ctx->gpu_addr + AMDGPU_USERQ_PROC_CTX_SZ;
+
+       amdgpu_mes_lock(&adev->mes);
+       r = adev->mes.funcs->remove_hw_queue(&adev->mes, &queue_input);
+       amdgpu_mes_unlock(&adev->mes);
+       if (r)
+               DRM_ERROR("Failed to unmap queue in HW, err (%d)\n", r);
+       queue->queue_active = false;
+}
+
+static int mes_userq_create_ctx_space(struct amdgpu_userq_mgr *uq_mgr,
+                                     struct amdgpu_usermode_queue *queue,
+                                     struct drm_amdgpu_userq_in *mqd_user)
+{
+       struct amdgpu_userq_obj *ctx = &queue->fw_obj;
+       int r, size;
+
+       /*
+        * The FW expects at least one page space allocated for
+        * process ctx and gang ctx each. Create an object
+        * for the same.
+        */
+       size = AMDGPU_USERQ_PROC_CTX_SZ + AMDGPU_USERQ_GANG_CTX_SZ;
+       r = amdgpu_userqueue_create_object(uq_mgr, ctx, size);
+       if (r) {
+               DRM_ERROR("Failed to allocate ctx space bo for userqueue, err:%d\n", r);
+               return r;
+       }
+
+       return 0;
+}
+
+static void mes_userq_set_fence_space(struct amdgpu_usermode_queue *queue)
+{
+       struct v11_gfx_mqd *mqd = queue->mqd.cpu_ptr;
+
+       mqd->fenceaddress_lo = lower_32_bits(queue->fence_drv->gpu_addr);
+       mqd->fenceaddress_hi = upper_32_bits(queue->fence_drv->gpu_addr);
+}
+
+static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
+                               struct drm_amdgpu_userq_in *args_in,
+                               struct amdgpu_usermode_queue *queue)
+{
+       struct amdgpu_device *adev = uq_mgr->adev;
+       struct amdgpu_mqd *mqd_hw_default = &adev->mqds[queue->queue_type];
+       struct drm_amdgpu_userq_in *mqd_user = args_in;
+       struct amdgpu_mqd_prop *userq_props;
+       int r;
+
+       /* Structure to initialize MQD for userqueue using generic MQD init function */
+       userq_props = kzalloc(sizeof(struct amdgpu_mqd_prop), GFP_KERNEL);
+       if (!userq_props) {
+               DRM_ERROR("Failed to allocate memory for userq_props\n");
+               return -ENOMEM;
+       }
+
+       if (!mqd_user->wptr_va || !mqd_user->rptr_va ||
+           !mqd_user->queue_va || mqd_user->queue_size == 0) {
+               DRM_ERROR("Invalid MQD parameters for userqueue\n");
+               r = -EINVAL;
+               goto free_props;
+       }
+
+       r = amdgpu_userqueue_create_object(uq_mgr, &queue->mqd, mqd_hw_default->mqd_size);
+       if (r) {
+               DRM_ERROR("Failed to create MQD object for userqueue\n");
+               goto free_props;
+       }
+
+       /* Initialize the MQD BO with user given values */
+       userq_props->wptr_gpu_addr = mqd_user->wptr_va;
+       userq_props->rptr_gpu_addr = mqd_user->rptr_va;
+       userq_props->queue_size = mqd_user->queue_size;
+       userq_props->hqd_base_gpu_addr = mqd_user->queue_va;
+       userq_props->mqd_gpu_addr = queue->mqd.gpu_addr;
+       userq_props->use_doorbell = true;
+       userq_props->doorbell_index = queue->doorbell_index;
+
+       if (queue->queue_type == AMDGPU_HW_IP_COMPUTE) {
+               struct drm_amdgpu_userq_mqd_compute_gfx11 *compute_mqd;
+
+               if (mqd_user->mqd_size != sizeof(*compute_mqd)) {
+                       DRM_ERROR("Invalid compute IP MQD size\n");
+                       r = -EINVAL;
+                       goto free_mqd;
+               }
+
+               compute_mqd = memdup_user(u64_to_user_ptr(mqd_user->mqd), mqd_user->mqd_size);
+               if (IS_ERR(compute_mqd)) {
+                       DRM_ERROR("Failed to read user MQD\n");
+                       r = -ENOMEM;
+                       goto free_mqd;
+               }
+
+               userq_props->eop_gpu_addr = compute_mqd->eop_va;
+               userq_props->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_NORMAL;
+               userq_props->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM;
+               userq_props->hqd_active = false;
+               kfree(compute_mqd);
+       } else if (queue->queue_type == AMDGPU_HW_IP_GFX) {
+               struct drm_amdgpu_userq_mqd_gfx11 *mqd_gfx_v11;
+
+               if (mqd_user->mqd_size != sizeof(*mqd_gfx_v11) || !mqd_user->mqd) {
+                       DRM_ERROR("Invalid GFX MQD\n");
+                       r = -EINVAL;
+                       goto free_mqd;
+               }
+
+               mqd_gfx_v11 = memdup_user(u64_to_user_ptr(mqd_user->mqd), mqd_user->mqd_size);
+               if (IS_ERR(mqd_gfx_v11)) {
+                       DRM_ERROR("Failed to read user MQD\n");
+                       r = -ENOMEM;
+                       goto free_mqd;
+               }
+
+               userq_props->shadow_addr = mqd_gfx_v11->shadow_va;
+               userq_props->csa_addr = mqd_gfx_v11->csa_va;
+               kfree(mqd_gfx_v11);
+       } else if (queue->queue_type == AMDGPU_HW_IP_DMA) {
+               struct drm_amdgpu_userq_mqd_sdma_gfx11 *mqd_sdma_v11;
+
+               if (mqd_user->mqd_size != sizeof(*mqd_sdma_v11) || !mqd_user->mqd) {
+                       DRM_ERROR("Invalid SDMA MQD\n");
+                       r = -EINVAL;
+                       goto free_mqd;
+               }
+
+               mqd_sdma_v11 = memdup_user(u64_to_user_ptr(mqd_user->mqd), mqd_user->mqd_size);
+               if (IS_ERR(mqd_sdma_v11)) {
+                       DRM_ERROR("Failed to read sdma user MQD\n");
+                       r = -ENOMEM;
+                       goto free_mqd;
+               }
+
+               userq_props->csa_addr = mqd_sdma_v11->csa_va;
+               kfree(mqd_sdma_v11);
+       }
+
+       queue->userq_prop = userq_props;
+
+       r = mqd_hw_default->init_mqd(adev, (void *)queue->mqd.cpu_ptr, userq_props);
+       if (r) {
+               DRM_ERROR("Failed to initialize MQD for userqueue\n");
+               goto free_mqd;
+       }
+
+       /* Create BO for FW operations */
+       r = mes_userq_create_ctx_space(uq_mgr, queue, mqd_user);
+       if (r) {
+               DRM_ERROR("Failed to allocate BO for userqueue (%d)", r);
+               goto free_mqd;
+       }
+
+       mes_userq_set_fence_space(queue);
+
+       /* FW expects WPTR BOs to be mapped into GART */
+       r = mes_userq_create_wptr_mapping(uq_mgr, queue, userq_props->wptr_gpu_addr);
+       if (r) {
+               DRM_ERROR("Failed to create WPTR mapping\n");
+               goto free_ctx;
+       }
+
+       /* Map userqueue into FW using MES */
+       r = mes_userq_map(uq_mgr, queue, userq_props);
+       if (r) {
+               DRM_ERROR("Failed to init MQD\n");
+               goto free_ctx;
+       }
+
+       return 0;
+
+free_ctx:
+       amdgpu_userqueue_destroy_object(uq_mgr, &queue->fw_obj);
+
+free_mqd:
+       amdgpu_userqueue_destroy_object(uq_mgr, &queue->mqd);
+
+free_props:
+       kfree(userq_props);
+
+       return r;
+}
+
+static void
+mes_userq_mqd_destroy(struct amdgpu_userq_mgr *uq_mgr,
+                           struct amdgpu_usermode_queue *queue)
+{
+       if (queue->queue_active)
+               mes_userq_unmap(uq_mgr, queue);
+
+       amdgpu_userqueue_destroy_object(uq_mgr, &queue->fw_obj);
+       kfree(queue->userq_prop);
+       amdgpu_userqueue_destroy_object(uq_mgr, &queue->mqd);
+}
+
+static int mes_userq_suspend(struct amdgpu_userq_mgr *uq_mgr,
+                                  struct amdgpu_usermode_queue *queue)
+{
+       if (queue->queue_active) {
+               mes_userq_unmap(uq_mgr, queue);
+               queue->queue_active = false;
+       }
+
+       return 0;
+}
+
+static int mes_userq_resume(struct amdgpu_userq_mgr *uq_mgr,
+                                 struct amdgpu_usermode_queue *queue)
+{
+       int ret;
+
+       if (queue->queue_active)
+               return 0;
+
+       ret = mes_userq_map(uq_mgr, queue, queue->userq_prop);
+       if (ret) {
+               DRM_ERROR("Failed to resume queue\n");
+               return ret;
+       }
+
+       queue->queue_active = true;
+       return 0;
+}
+
+const struct amdgpu_userq_funcs userq_mes_funcs = {
+       .mqd_create = mes_userq_mqd_create,
+       .mqd_destroy = mes_userq_mqd_destroy,
+       .suspend = mes_userq_suspend,
+       .resume = mes_userq_resume,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_userqueue.h b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.h
new file mode 100644 (file)
index 0000000..d0a5213
--- /dev/null
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2024 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef MES_USERQ_H
+#define MES_USERQ_H
+#include "amdgpu_userqueue.h"
+
+extern const struct amdgpu_userq_funcs userq_mes_funcs;
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0_userqueue.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0_userqueue.c
deleted file mode 100644 (file)
index 1ba6b91..0000000
+++ /dev/null
@@ -1,383 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright 2024 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#include "amdgpu.h"
-#include "amdgpu_gfx.h"
-#include "mes_v11_0_userqueue.h"
-#include "amdgpu_userq_fence.h"
-
-#define AMDGPU_USERQ_PROC_CTX_SZ PAGE_SIZE
-#define AMDGPU_USERQ_GANG_CTX_SZ PAGE_SIZE
-
-static int
-mes_v11_0_map_gtt_bo_to_gart(struct amdgpu_bo *bo)
-{
-       int ret;
-
-       ret = amdgpu_bo_reserve(bo, true);
-       if (ret) {
-               DRM_ERROR("Failed to reserve bo. ret %d\n", ret);
-               goto err_reserve_bo_failed;
-       }
-
-       ret = amdgpu_ttm_alloc_gart(&bo->tbo);
-       if (ret) {
-               DRM_ERROR("Failed to bind bo to GART. ret %d\n", ret);
-               goto err_map_bo_gart_failed;
-       }
-
-       amdgpu_bo_unreserve(bo);
-       bo = amdgpu_bo_ref(bo);
-
-       return 0;
-
-err_map_bo_gart_failed:
-       amdgpu_bo_unreserve(bo);
-err_reserve_bo_failed:
-       return ret;
-}
-
-static int
-mes_v11_0_create_wptr_mapping(struct amdgpu_userq_mgr *uq_mgr,
-                             struct amdgpu_usermode_queue *queue,
-                             uint64_t wptr)
-{
-       struct amdgpu_bo_va_mapping *wptr_mapping;
-       struct amdgpu_vm *wptr_vm;
-       struct amdgpu_userq_obj *wptr_obj = &queue->wptr_obj;
-       int ret;
-
-       wptr_vm = queue->vm;
-       ret = amdgpu_bo_reserve(wptr_vm->root.bo, false);
-       if (ret)
-               return ret;
-
-       wptr &= AMDGPU_GMC_HOLE_MASK;
-       wptr_mapping = amdgpu_vm_bo_lookup_mapping(wptr_vm, wptr >> PAGE_SHIFT);
-       amdgpu_bo_unreserve(wptr_vm->root.bo);
-       if (!wptr_mapping) {
-               DRM_ERROR("Failed to lookup wptr bo\n");
-               return -EINVAL;
-       }
-
-       wptr_obj->obj = wptr_mapping->bo_va->base.bo;
-       if (wptr_obj->obj->tbo.base.size > PAGE_SIZE) {
-               DRM_ERROR("Requested GART mapping for wptr bo larger than one page\n");
-               return -EINVAL;
-       }
-
-       ret = mes_v11_0_map_gtt_bo_to_gart(wptr_obj->obj);
-       if (ret) {
-               DRM_ERROR("Failed to map wptr bo to GART\n");
-               return ret;
-       }
-
-       queue->wptr_obj.gpu_addr = amdgpu_bo_gpu_offset_no_check(wptr_obj->obj);
-       return 0;
-}
-
-static int mes_v11_0_userq_map(struct amdgpu_userq_mgr *uq_mgr,
-                              struct amdgpu_usermode_queue *queue,
-                              struct amdgpu_mqd_prop *userq_props)
-{
-       struct amdgpu_device *adev = uq_mgr->adev;
-       struct amdgpu_userq_obj *ctx = &queue->fw_obj;
-       struct mes_add_queue_input queue_input;
-       int r;
-
-       memset(&queue_input, 0x0, sizeof(struct mes_add_queue_input));
-
-       queue_input.process_va_start = 0;
-       queue_input.process_va_end = (adev->vm_manager.max_pfn - 1) << AMDGPU_GPU_PAGE_SHIFT;
-
-       /* set process quantum to 10 ms and gang quantum to 1 ms as default */
-       queue_input.process_quantum = 100000;
-       queue_input.gang_quantum = 10000;
-       queue_input.paging = false;
-
-       queue_input.process_context_addr = ctx->gpu_addr;
-       queue_input.gang_context_addr = ctx->gpu_addr + AMDGPU_USERQ_PROC_CTX_SZ;
-       queue_input.inprocess_gang_priority = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
-       queue_input.gang_global_priority_level = AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
-
-       queue_input.process_id = queue->vm->pasid;
-       queue_input.queue_type = queue->queue_type;
-       queue_input.mqd_addr = queue->mqd.gpu_addr;
-       queue_input.wptr_addr = userq_props->wptr_gpu_addr;
-       queue_input.queue_size = userq_props->queue_size >> 2;
-       queue_input.doorbell_offset = userq_props->doorbell_index;
-       queue_input.page_table_base_addr = amdgpu_gmc_pd_addr(queue->vm->root.bo);
-       queue_input.wptr_mc_addr = queue->wptr_obj.gpu_addr;
-
-       amdgpu_mes_lock(&adev->mes);
-       r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input);
-       amdgpu_mes_unlock(&adev->mes);
-       if (r) {
-               DRM_ERROR("Failed to map queue in HW, err (%d)\n", r);
-               return r;
-       }
-
-       queue->queue_active = true;
-       DRM_DEBUG_DRIVER("Queue (doorbell:%d) mapped successfully\n", userq_props->doorbell_index);
-       return 0;
-}
-
-static void mes_v11_0_userq_unmap(struct amdgpu_userq_mgr *uq_mgr,
-                                 struct amdgpu_usermode_queue *queue)
-{
-       struct amdgpu_device *adev = uq_mgr->adev;
-       struct mes_remove_queue_input queue_input;
-       struct amdgpu_userq_obj *ctx = &queue->fw_obj;
-       int r;
-
-       memset(&queue_input, 0x0, sizeof(struct mes_remove_queue_input));
-       queue_input.doorbell_offset = queue->doorbell_index;
-       queue_input.gang_context_addr = ctx->gpu_addr + AMDGPU_USERQ_PROC_CTX_SZ;
-
-       amdgpu_mes_lock(&adev->mes);
-       r = adev->mes.funcs->remove_hw_queue(&adev->mes, &queue_input);
-       amdgpu_mes_unlock(&adev->mes);
-       if (r)
-               DRM_ERROR("Failed to unmap queue in HW, err (%d)\n", r);
-       queue->queue_active = false;
-}
-
-static int mes_v11_0_userq_create_ctx_space(struct amdgpu_userq_mgr *uq_mgr,
-                                           struct amdgpu_usermode_queue *queue,
-                                           struct drm_amdgpu_userq_in *mqd_user)
-{
-       struct amdgpu_userq_obj *ctx = &queue->fw_obj;
-       int r, size;
-
-       /*
-        * The FW expects at least one page space allocated for
-        * process ctx and gang ctx each. Create an object
-        * for the same.
-        */
-       size = AMDGPU_USERQ_PROC_CTX_SZ + AMDGPU_USERQ_GANG_CTX_SZ;
-       r = amdgpu_userqueue_create_object(uq_mgr, ctx, size);
-       if (r) {
-               DRM_ERROR("Failed to allocate ctx space bo for userqueue, err:%d\n", r);
-               return r;
-       }
-
-       return 0;
-}
-
-static void mes_v11_0_userq_set_fence_space(struct amdgpu_usermode_queue *queue)
-{
-       struct v11_gfx_mqd *mqd = queue->mqd.cpu_ptr;
-
-       mqd->fenceaddress_lo = lower_32_bits(queue->fence_drv->gpu_addr);
-       mqd->fenceaddress_hi = upper_32_bits(queue->fence_drv->gpu_addr);
-}
-
-static int mes_v11_0_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
-                                     struct drm_amdgpu_userq_in *args_in,
-                                     struct amdgpu_usermode_queue *queue)
-{
-       struct amdgpu_device *adev = uq_mgr->adev;
-       struct amdgpu_mqd *mqd_hw_default = &adev->mqds[queue->queue_type];
-       struct drm_amdgpu_userq_in *mqd_user = args_in;
-       struct amdgpu_mqd_prop *userq_props;
-       int r;
-
-       /* Structure to initialize MQD for userqueue using generic MQD init function */
-       userq_props = kzalloc(sizeof(struct amdgpu_mqd_prop), GFP_KERNEL);
-       if (!userq_props) {
-               DRM_ERROR("Failed to allocate memory for userq_props\n");
-               return -ENOMEM;
-       }
-
-       if (!mqd_user->wptr_va || !mqd_user->rptr_va ||
-           !mqd_user->queue_va || mqd_user->queue_size == 0) {
-               DRM_ERROR("Invalid MQD parameters for userqueue\n");
-               r = -EINVAL;
-               goto free_props;
-       }
-
-       r = amdgpu_userqueue_create_object(uq_mgr, &queue->mqd, mqd_hw_default->mqd_size);
-       if (r) {
-               DRM_ERROR("Failed to create MQD object for userqueue\n");
-               goto free_props;
-       }
-
-       /* Initialize the MQD BO with user given values */
-       userq_props->wptr_gpu_addr = mqd_user->wptr_va;
-       userq_props->rptr_gpu_addr = mqd_user->rptr_va;
-       userq_props->queue_size = mqd_user->queue_size;
-       userq_props->hqd_base_gpu_addr = mqd_user->queue_va;
-       userq_props->mqd_gpu_addr = queue->mqd.gpu_addr;
-       userq_props->use_doorbell = true;
-       userq_props->doorbell_index = queue->doorbell_index;
-
-       if (queue->queue_type == AMDGPU_HW_IP_COMPUTE) {
-               struct drm_amdgpu_userq_mqd_compute_gfx11 *compute_mqd;
-
-               if (mqd_user->mqd_size != sizeof(*compute_mqd)) {
-                       DRM_ERROR("Invalid compute IP MQD size\n");
-                       r = -EINVAL;
-                       goto free_mqd;
-               }
-
-               compute_mqd = memdup_user(u64_to_user_ptr(mqd_user->mqd), mqd_user->mqd_size);
-               if (IS_ERR(compute_mqd)) {
-                       DRM_ERROR("Failed to read user MQD\n");
-                       r = -ENOMEM;
-                       goto free_mqd;
-               }
-
-               userq_props->eop_gpu_addr = compute_mqd->eop_va;
-               userq_props->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_NORMAL;
-               userq_props->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM;
-               userq_props->hqd_active = false;
-               kfree(compute_mqd);
-       } else if (queue->queue_type == AMDGPU_HW_IP_GFX) {
-               struct drm_amdgpu_userq_mqd_gfx11 *mqd_gfx_v11;
-
-               if (mqd_user->mqd_size != sizeof(*mqd_gfx_v11) || !mqd_user->mqd) {
-                       DRM_ERROR("Invalid GFX MQD\n");
-                       return -EINVAL;
-               }
-
-               mqd_gfx_v11 = memdup_user(u64_to_user_ptr(mqd_user->mqd), mqd_user->mqd_size);
-               if (IS_ERR(mqd_gfx_v11)) {
-                       DRM_ERROR("Failed to read user MQD\n");
-                       amdgpu_userqueue_destroy_object(uq_mgr, ctx);
-                       return -ENOMEM;
-               }
-
-               userq_props->shadow_addr = mqd_gfx_v11->shadow_va;
-               userq_props->csa_addr = mqd_gfx_v11->csa_va;
-               kfree(mqd_gfx_v11);
-       } else if (queue->queue_type == AMDGPU_HW_IP_DMA) {
-               struct drm_amdgpu_userq_mqd_sdma_gfx11 *mqd_sdma_v11;
-
-               if (mqd_user->mqd_size != sizeof(*mqd_sdma_v11) || !mqd_user->mqd) {
-                       DRM_ERROR("Invalid SDMA MQD\n");
-                       return -EINVAL;
-               }
-
-               mqd_sdma_v11 = memdup_user(u64_to_user_ptr(mqd_user->mqd), mqd_user->mqd_size);
-               if (IS_ERR(mqd_sdma_v11)) {
-                       DRM_ERROR("Failed to read sdma user MQD\n");
-                       amdgpu_userqueue_destroy_object(uq_mgr, ctx);
-                       return -ENOMEM;
-               }
-
-               userq_props->csa_addr = mqd_sdma_v11->csa_va;
-               kfree(mqd_sdma_v11);
-       }
-
-       queue->userq_prop = userq_props;
-
-       r = mqd_hw_default->init_mqd(adev, (void *)queue->mqd.cpu_ptr, userq_props);
-       if (r) {
-               DRM_ERROR("Failed to initialize MQD for userqueue\n");
-               goto free_mqd;
-       }
-
-       /* Create BO for FW operations */
-       r = mes_v11_0_userq_create_ctx_space(uq_mgr, queue, mqd_user);
-       if (r) {
-               DRM_ERROR("Failed to allocate BO for userqueue (%d)", r);
-               goto free_mqd;
-       }
-
-       mes_v11_0_userq_set_fence_space(queue);
-
-       /* FW expects WPTR BOs to be mapped into GART */
-       r = mes_v11_0_create_wptr_mapping(uq_mgr, queue, userq_props->wptr_gpu_addr);
-       if (r) {
-               DRM_ERROR("Failed to create WPTR mapping\n");
-               goto free_ctx;
-       }
-
-       /* Map userqueue into FW using MES */
-       r = mes_v11_0_userq_map(uq_mgr, queue, userq_props);
-       if (r) {
-               DRM_ERROR("Failed to init MQD\n");
-               goto free_ctx;
-       }
-
-       return 0;
-
-free_ctx:
-       amdgpu_userqueue_destroy_object(uq_mgr, &queue->fw_obj);
-
-free_mqd:
-       amdgpu_userqueue_destroy_object(uq_mgr, &queue->mqd);
-
-free_props:
-       kfree(userq_props);
-
-       return r;
-}
-
-static void
-mes_v11_0_userq_mqd_destroy(struct amdgpu_userq_mgr *uq_mgr,
-                           struct amdgpu_usermode_queue *queue)
-{
-       if (queue->queue_active)
-               mes_v11_0_userq_unmap(uq_mgr, queue);
-
-       amdgpu_userqueue_destroy_object(uq_mgr, &queue->fw_obj);
-       kfree(queue->userq_prop);
-       amdgpu_userqueue_destroy_object(uq_mgr, &queue->mqd);
-}
-
-static int mes_v11_0_userq_suspend(struct amdgpu_userq_mgr *uq_mgr,
-                                  struct amdgpu_usermode_queue *queue)
-{
-       if (queue->queue_active) {
-               mes_v11_0_userq_unmap(uq_mgr, queue);
-               queue->queue_active = false;
-       }
-
-       return 0;
-}
-
-static int mes_v11_0_userq_resume(struct amdgpu_userq_mgr *uq_mgr,
-                                 struct amdgpu_usermode_queue *queue)
-{
-       int ret;
-
-       if (queue->queue_active)
-               return 0;
-
-       ret = mes_v11_0_userq_map(uq_mgr, queue, queue->userq_prop);
-       if (ret) {
-               DRM_ERROR("Failed to resume queue\n");
-               return ret;
-       }
-
-       queue->queue_active = true;
-       return 0;
-}
-
-const struct amdgpu_userq_funcs userq_mes_v11_0_funcs = {
-       .mqd_create = mes_v11_0_userq_mqd_create,
-       .mqd_destroy = mes_v11_0_userq_mqd_destroy,
-       .suspend = mes_v11_0_userq_suspend,
-       .resume = mes_v11_0_userq_resume,
-};
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0_userqueue.h b/drivers/gpu/drm/amd/amdgpu/mes_v11_0_userqueue.h
deleted file mode 100644 (file)
index 2c10236..0000000
+++ /dev/null
@@ -1,30 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright 2024 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef MES_V11_0_USERQ_H
-#define MES_V11_0_USERQ_H
-#include "amdgpu_userqueue.h"
-
-extern const struct amdgpu_userq_funcs userq_mes_v11_0_funcs;
-#endif
index fe389379e890bee5033a3115fee9f582dca1a77e..9bc3c7a35d185cf79ead72629b851ebec9f485d8 100644 (file)
@@ -43,7 +43,7 @@
 #include "sdma_common.h"
 #include "sdma_v6_0.h"
 #include "v11_structs.h"
-#include "mes_v11_0_userqueue.h"
+#include "mes_userqueue.h"
 
 MODULE_FIRMWARE("amdgpu/sdma_6_0_0.bin");
 MODULE_FIRMWARE("amdgpu/sdma_6_0_1.bin");
@@ -1381,7 +1381,7 @@ static int sdma_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
                DRM_ERROR("Failed to allocated memory for SDMA IP Dump\n");
 
 #ifdef CONFIG_DRM_AMDGPU_NAVI3X_USERQ
-       adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_v11_0_funcs;
+       adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
 #endif
        r = amdgpu_sdma_sysfs_reset_mask_init(adev);
        if (r)