return 0;
}
+static bool sdma_v4_4_2_is_queue_selected(struct amdgpu_device *adev, uint32_t instance_id, bool is_page_queue)
+{
+ uint32_t reg_offset = is_page_queue ? regSDMA_PAGE_CONTEXT_STATUS : regSDMA_GFX_CONTEXT_STATUS;
+ uint32_t context_status = RREG32(sdma_v4_4_2_get_reg_offset(adev, instance_id, reg_offset));
+
+ /* Check if the SELECTED bit is set */
+ return (context_status & SDMA_GFX_CONTEXT_STATUS__SELECTED_MASK) != 0;
+}
+
+static bool sdma_v4_4_2_ring_is_guilty(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t instance_id = ring->me;
+
+ return sdma_v4_4_2_is_queue_selected(adev, instance_id, false);
+}
+
+static bool sdma_v4_4_2_page_ring_is_guilty(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t instance_id = ring->me;
+
+ if (!adev->sdma.has_page_queue)
+ return false;
+
+ return sdma_v4_4_2_is_queue_selected(adev, instance_id, true);
+}
+
static int sdma_v4_4_2_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
{
struct amdgpu_device *adev = ring->adev;
.emit_reg_wait = sdma_v4_4_2_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
.reset = sdma_v4_4_2_reset_queue,
+ .is_guilty = sdma_v4_4_2_ring_is_guilty,
};
static const struct amdgpu_ring_funcs sdma_v4_4_2_page_ring_funcs = {
.emit_wreg = sdma_v4_4_2_ring_emit_wreg,
.emit_reg_wait = sdma_v4_4_2_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .is_guilty = sdma_v4_4_2_page_ring_is_guilty,
};
static void sdma_v4_4_2_set_ring_funcs(struct amdgpu_device *adev)