drm/xe: Kill xe_device_mem_access_{get*,put}
authorRodrigo Vivi <rodrigo.vivi@intel.com>
Thu, 18 Apr 2024 14:30:49 +0000 (10:30 -0400)
committerRodrigo Vivi <rodrigo.vivi@intel.com>
Mon, 22 Apr 2024 13:03:09 +0000 (09:03 -0400)
Let's simply convert all the current callers towards direct
xe_pm_runtime access and remove this extra layer of indirection.

No functional change is expected with this patch since
xe_mem_access_get was already using the xe_pm_runtime_get_noresume
at this point.

v2: Convert all the current callers instead of a big refactor
at once.

v3: - Rebased
    - Squashed the GSC/HDCP
    - Added a new case: sriov_pf_policy
    - Improved commit message to highlight that
      there's no functional change in this patch.

Reviewed-by: Matthew Auld <matthew.auld@intel.com> #v2
Cc: Suraj Kandpal <suraj.kandpal@intel.com>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Reviewed-by: Suraj Kandpal <suraj.kandpal@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240418143049.43231-1-rodrigo.vivi@intel.com
drivers/gpu/drm/xe/display/xe_fb_pin.c
drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
drivers/gpu/drm/xe/xe_bo.c
drivers/gpu/drm/xe/xe_device.c
drivers/gpu/drm/xe/xe_device.h
drivers/gpu/drm/xe/xe_device_types.h
drivers/gpu/drm/xe/xe_exec_queue.c
drivers/gpu/drm/xe/xe_ggtt.c
drivers/gpu/drm/xe/xe_gt_sriov_pf_policy.c
drivers/gpu/drm/xe/xe_sched_job.c
drivers/gpu/drm/xe/xe_vm.c

index 3a584bc3a0a30cc4d4b456637bb8ef19aacefb60..3e1ae37c4c8b16d8895a53ca73c69f851fa1c9c7 100644 (file)
@@ -10,6 +10,7 @@
 #include "intel_fb_pin.h"
 #include "xe_ggtt.h"
 #include "xe_gt.h"
+#include "xe_pm.h"
 
 #include <drm/ttm/ttm_bo.h>
 
@@ -193,7 +194,7 @@ static int __xe_pin_fb_vma_ggtt(struct intel_framebuffer *fb,
        /* TODO: Consider sharing framebuffer mapping?
         * embed i915_vma inside intel_framebuffer
         */
-       xe_device_mem_access_get(tile_to_xe(ggtt->tile));
+       xe_pm_runtime_get_noresume(tile_to_xe(ggtt->tile));
        ret = mutex_lock_interruptible(&ggtt->lock);
        if (ret)
                goto out;
@@ -244,7 +245,7 @@ static int __xe_pin_fb_vma_ggtt(struct intel_framebuffer *fb,
 out_unlock:
        mutex_unlock(&ggtt->lock);
 out:
-       xe_device_mem_access_put(tile_to_xe(ggtt->tile));
+       xe_pm_runtime_put(tile_to_xe(ggtt->tile));
        return ret;
 }
 
index 264b957f3639d87bc7fe7872f8efc4aabc0ef920..d46f87a039f2091bc0769e8eb7f19bd0c2bb7c51 100644 (file)
@@ -215,7 +215,7 @@ ssize_t intel_hdcp_gsc_msg_send(struct xe_device *xe, u8 *msg_in,
        addr_out_off = PAGE_SIZE;
 
        host_session_id = xe_gsc_create_host_session_id();
-       xe_device_mem_access_get(xe);
+       xe_pm_runtime_get_noresume(xe);
        addr_in_wr_off = xe_gsc_emit_header(xe, &hdcp_message->hdcp_bo->vmap,
                                            addr_in_wr_off, HECI_MEADDRESS_HDCP,
                                            host_session_id, msg_in_len);
@@ -247,6 +247,6 @@ ssize_t intel_hdcp_gsc_msg_send(struct xe_device *xe, u8 *msg_in,
                           msg_out_len);
 
 out:
-       xe_device_mem_access_put(xe);
+       xe_pm_runtime_put(xe);
        return ret;
 }
index 9889adcc458bb9be71e196e4bf1efd465b33d34d..bc1f794e3e614e70aca3345cfddf1a44b72152fb 100644 (file)
@@ -716,7 +716,7 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
 
        xe_assert(xe, migrate);
        trace_xe_bo_move(bo, new_mem->mem_type, old_mem_type, move_lacks_source);
-       xe_device_mem_access_get(xe);
+       xe_pm_runtime_get_noresume(xe);
 
        if (xe_bo_is_pinned(bo) && !xe_bo_is_user(bo)) {
                /*
@@ -740,7 +740,7 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
 
                                if (XE_WARN_ON(new_mem->start == XE_BO_INVALID_OFFSET)) {
                                        ret = -EINVAL;
-                                       xe_device_mem_access_put(xe);
+                                       xe_pm_runtime_put(xe);
                                        goto out;
                                }
 
@@ -758,7 +758,7 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
                                                new_mem, handle_system_ccs);
                if (IS_ERR(fence)) {
                        ret = PTR_ERR(fence);
-                       xe_device_mem_access_put(xe);
+                       xe_pm_runtime_put(xe);
                        goto out;
                }
                if (!move_lacks_source) {
@@ -783,7 +783,7 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
                dma_fence_put(fence);
        }
 
-       xe_device_mem_access_put(xe);
+       xe_pm_runtime_put(xe);
 
 out:
        return ret;
index 3b082059483664824bff58b2370d03192b7554a9..55bbc8b8df159ae3667df8c82c75eaca4bad031d 100644 (file)
@@ -729,42 +729,6 @@ void xe_device_assert_mem_access(struct xe_device *xe)
        xe_assert(xe, !xe_pm_runtime_suspended(xe));
 }
 
-void xe_device_mem_access_get(struct xe_device *xe)
-{
-       int ref;
-
-       /*
-        * This looks racy, but should be fine since the pm_callback_task only
-        * transitions from NULL -> current (and back to NULL again), during the
-        * runtime_resume() or runtime_suspend() callbacks, for which there can
-        * only be a single one running for our device. We only need to prevent
-        * recursively calling the runtime_get or runtime_put from those
-        * callbacks, as well as preventing triggering any access_ongoing
-        * asserts.
-        */
-       if (xe_pm_read_callback_task(xe) == current)
-               return;
-
-       xe_pm_runtime_get_noresume(xe);
-       ref = atomic_inc_return(&xe->mem_access.ref);
-
-       xe_assert(xe, ref != S32_MAX);
-
-}
-
-void xe_device_mem_access_put(struct xe_device *xe)
-{
-       int ref;
-
-       if (xe_pm_read_callback_task(xe) == current)
-               return;
-
-       ref = atomic_dec_return(&xe->mem_access.ref);
-       xe_pm_runtime_put(xe);
-
-       xe_assert(xe, ref >= 0);
-}
-
 void xe_device_snapshot_print(struct xe_device *xe, struct drm_printer *p)
 {
        struct xe_gt *gt;
index 54490802e97b7bdee74794681024c9b7ab46c615..36d4434ebcccb5c800267e6c92f34d0f97f1ab1c 100644 (file)
@@ -133,9 +133,6 @@ static inline struct xe_force_wake *gt_to_fw(struct xe_gt *gt)
        return &gt->mmio.fw;
 }
 
-void xe_device_mem_access_get(struct xe_device *xe);
-void xe_device_mem_access_put(struct xe_device *xe);
-
 void xe_device_assert_mem_access(struct xe_device *xe);
 
 static inline bool xe_device_in_fault_mode(struct xe_device *xe)
index 8244b177a6a3efbad27fc5e1d6e41debe070c8b3..8a9f12a8d7c171c106f02d878cff9881b79a0be1 100644 (file)
@@ -384,9 +384,6 @@ struct xe_device {
         * triggering additional actions when they occur.
         */
        struct {
-               /** @mem_access.ref: ref count of memory accesses */
-               atomic_t ref;
-
                /**
                 * @mem_access.vram_userfault: Encapsulate vram_userfault
                 * related stuff
index 50ec661116a2fca8923d76e8525f5a0f83213026..395de93579fa639875af31adaaac66ca55aa99cc 100644 (file)
@@ -589,7 +589,7 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
                                return -EINVAL;
 
                        /* The migration vm doesn't hold rpm ref */
-                       xe_device_mem_access_get(xe);
+                       xe_pm_runtime_get_noresume(xe);
 
                        flags = EXEC_QUEUE_FLAG_VM | (id ? EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD : 0);
 
@@ -598,7 +598,7 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
                                                   args->width, hwe, flags,
                                                   args->extensions);
 
-                       xe_device_mem_access_put(xe); /* now held by engine */
+                       xe_pm_runtime_put(xe); /* now held by engine */
 
                        xe_vm_put(migrate_vm);
                        if (IS_ERR(new)) {
index 38f6c94c722dd0ed578427f7e9cb96dea9a205e8..0d541f55b4fcf9701a0ff2a88c18a3de0e90d5d4 100644 (file)
@@ -21,6 +21,7 @@
 #include "xe_gt_printk.h"
 #include "xe_gt_tlb_invalidation.h"
 #include "xe_map.h"
+#include "xe_pm.h"
 #include "xe_sriov.h"
 #include "xe_wopcm.h"
 
@@ -403,7 +404,7 @@ static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
        if (err)
                return err;
 
-       xe_device_mem_access_get(tile_to_xe(ggtt->tile));
+       xe_pm_runtime_get_noresume(tile_to_xe(ggtt->tile));
        mutex_lock(&ggtt->lock);
        err = drm_mm_insert_node_in_range(&ggtt->mm, &bo->ggtt_node, bo->size,
                                          alignment, 0, start, end, 0);
@@ -413,7 +414,7 @@ static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
 
        if (!err && bo->flags & XE_BO_FLAG_GGTT_INVALIDATE)
                xe_ggtt_invalidate(ggtt);
-       xe_device_mem_access_put(tile_to_xe(ggtt->tile));
+       xe_pm_runtime_put(tile_to_xe(ggtt->tile));
 
        return err;
 }
@@ -432,7 +433,7 @@ int xe_ggtt_insert_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
 void xe_ggtt_remove_node(struct xe_ggtt *ggtt, struct drm_mm_node *node,
                         bool invalidate)
 {
-       xe_device_mem_access_get(tile_to_xe(ggtt->tile));
+       xe_pm_runtime_get_noresume(tile_to_xe(ggtt->tile));
 
        mutex_lock(&ggtt->lock);
        xe_ggtt_clear(ggtt, node->start, node->size);
@@ -443,7 +444,7 @@ void xe_ggtt_remove_node(struct xe_ggtt *ggtt, struct drm_mm_node *node,
        if (invalidate)
                xe_ggtt_invalidate(ggtt);
 
-       xe_device_mem_access_put(tile_to_xe(ggtt->tile));
+       xe_pm_runtime_put(tile_to_xe(ggtt->tile));
 }
 
 void xe_ggtt_remove_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
index 3eaa17ca54fcea6c7065a26d86c058b49929f1d8..fae5be5a2a11d97b7f9a0e442227243c224f936b 100644 (file)
@@ -12,6 +12,7 @@
 #include "xe_gt_sriov_printk.h"
 #include "xe_guc_ct.h"
 #include "xe_guc_klv_helpers.h"
+#include "xe_pm.h"
 
 /*
  * Return: number of KLVs that were successfully parsed and saved,
@@ -368,7 +369,7 @@ int xe_gt_sriov_pf_policy_reprovision(struct xe_gt *gt, bool reset)
 {
        int err = 0;
 
-       xe_device_mem_access_get(gt_to_xe(gt));
+       xe_pm_runtime_get_noresume(gt_to_xe(gt));
 
        mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
        if (reset)
@@ -378,7 +379,7 @@ int xe_gt_sriov_pf_policy_reprovision(struct xe_gt *gt, bool reset)
        err |= pf_reprovision_sample_period(gt);
        mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
 
-       xe_device_mem_access_put(gt_to_xe(gt));
+       xe_pm_runtime_put(gt_to_xe(gt));
 
        return err ? -ENXIO : 0;
 }
index 80daee910ae95e6d1ec37ed8bdd40ee6f4aaedbb..cd8a2fba543894a939a54aee4b067cd7859d61b0 100644 (file)
@@ -16,6 +16,7 @@
 #include "xe_hw_fence.h"
 #include "xe_lrc.h"
 #include "xe_macros.h"
+#include "xe_pm.h"
 #include "xe_sync_types.h"
 #include "xe_trace.h"
 #include "xe_vm.h"
@@ -159,7 +160,7 @@ struct xe_sched_job *xe_sched_job_create(struct xe_exec_queue *q,
 
        /* All other jobs require a VM to be open which has a ref */
        if (unlikely(q->flags & EXEC_QUEUE_FLAG_KERNEL))
-               xe_device_mem_access_get(job_to_xe(job));
+               xe_pm_runtime_get_noresume(job_to_xe(job));
        xe_device_assert_mem_access(job_to_xe(job));
 
        trace_xe_sched_job_create(job);
@@ -192,7 +193,7 @@ void xe_sched_job_destroy(struct kref *ref)
                container_of(ref, struct xe_sched_job, refcount);
 
        if (unlikely(job->q->flags & EXEC_QUEUE_FLAG_KERNEL))
-               xe_device_mem_access_put(job_to_xe(job));
+               xe_pm_runtime_put(job_to_xe(job));
        xe_exec_queue_put(job->q);
        dma_fence_put(job->fence);
        drm_sched_job_cleanup(&job->drm);
index 8a858b8588bd0fe0ce29f0c8c0e8017d60ec0386..85d6f359142df31fce6c9208e587c7ce09ec061a 100644 (file)
@@ -1266,7 +1266,7 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
        vm->pt_ops = &xelp_pt_ops;
 
        if (!(flags & XE_VM_FLAG_MIGRATION))
-               xe_device_mem_access_get(xe);
+               xe_pm_runtime_get_noresume(xe);
 
        vm_resv_obj = drm_gpuvm_resv_object_alloc(&xe->drm);
        if (!vm_resv_obj) {
@@ -1376,7 +1376,7 @@ err_no_resv:
                xe_range_fence_tree_fini(&vm->rftree[id]);
        kfree(vm);
        if (!(flags & XE_VM_FLAG_MIGRATION))
-               xe_device_mem_access_put(xe);
+               xe_pm_runtime_put(xe);
        return ERR_PTR(err);
 }
 
@@ -1507,7 +1507,7 @@ static void xe_vm_free(struct drm_gpuvm *gpuvm)
        mutex_destroy(&vm->snap_mutex);
 
        if (!(vm->flags & XE_VM_FLAG_MIGRATION))
-               xe_device_mem_access_put(xe);
+               xe_pm_runtime_put(xe);
 
        for_each_tile(tile, xe, id)
                XE_WARN_ON(vm->pt_root[id]);