drm/amdkfd: Fix SDMA in CPX mode
authorMukul Joshi <mukul.joshi@amd.com>
Tue, 31 May 2022 20:25:16 +0000 (16:25 -0400)
committerAlex Deucher <alexander.deucher@amd.com>
Fri, 9 Jun 2023 13:44:13 +0000 (09:44 -0400)
When creating a user-mode SDMA queue, CP FW expects
driver to use/set virtual SDMA engine id in MAP_QUEUES
packet instead of using the physical SDMA engine id.
Each partition node's virtual SDMA number should start
from 0. However, when allocating doorbell for the queue,
KFD needs to allocate the doorbell from doorbell space
corresponding to the physical SDMA engine id, otherwise
the hwardware will not see the doorbell press.

Signed-off-by: Mukul Joshi <mukul.joshi@amd.com>
Reviewed-by: Amber Lin <Amber.Lin@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c

index 69419a53a14e575ff5fc4526576bfef88ef739c5..2b5c4b2dd2423aa72db60baf2ee616e477ae7f5e 100644 (file)
@@ -363,7 +363,16 @@ static int allocate_doorbell(struct qcm_process_device *qpd,
                 */
 
                uint32_t *idx_offset = dev->kfd->shared_resources.sdma_doorbell_idx;
-               uint32_t valid_id = idx_offset[q->properties.sdma_engine_id]
+
+               /*
+                * q->properties.sdma_engine_id corresponds to the virtual
+                * sdma engine number. However, for doorbell allocation,
+                * we need the physical sdma engine id in order to get the
+                * correct doorbell offset.
+                */
+               uint32_t valid_id = idx_offset[qpd->dqm->dev->node_id *
+                                              get_num_all_sdma_engines(qpd->dqm) +
+                                              q->properties.sdma_engine_id]
                                                + (q->properties.sdma_queue_id & 1)
                                                * KFD_QUEUE_DOORBELL_MIRROR_OFFSET
                                                + (q->properties.sdma_queue_id >> 1);
@@ -1388,7 +1397,6 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm,
                }
 
                q->properties.sdma_engine_id =
-                       dqm->dev->node_id * get_num_all_sdma_engines(dqm) +
                        q->sdma_id % kfd_get_num_sdma_engines(dqm->dev);
                q->properties.sdma_queue_id = q->sdma_id /
                                kfd_get_num_sdma_engines(dqm->dev);
@@ -1418,7 +1426,6 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm,
                 * PCIe-optimized ones
                 */
                q->properties.sdma_engine_id =
-                       dqm->dev->node_id * get_num_all_sdma_engines(dqm) +
                        kfd_get_num_sdma_engines(dqm->dev) +
                        q->sdma_id % kfd_get_num_xgmi_sdma_engines(dqm->dev);
                q->properties.sdma_queue_id = q->sdma_id /
@@ -2486,6 +2493,7 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data)
        int pipe, queue;
        int r = 0, xcc;
        uint32_t inst;
+       uint32_t sdma_engine_start;
 
        if (!dqm->sched_running) {
                seq_puts(m, " Device is stopped\n");
@@ -2530,7 +2538,10 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data)
                }
        }
 
-       for (pipe = 0; pipe < get_num_all_sdma_engines(dqm); pipe++) {
+       sdma_engine_start = dqm->dev->node_id * get_num_all_sdma_engines(dqm);
+       for (pipe = sdma_engine_start;
+            pipe < (sdma_engine_start + get_num_all_sdma_engines(dqm));
+            pipe++) {
                for (queue = 0;
                     queue < dqm->dev->kfd->device_info.num_sdma_queues_per_engine;
                     queue++) {