drm/xe: Convert mem_access_if_ongoing to direct xe_pm_runtime_get_if_active
authorRodrigo Vivi <rodrigo.vivi@intel.com>
Wed, 17 Apr 2024 20:39:50 +0000 (16:39 -0400)
committerRodrigo Vivi <rodrigo.vivi@intel.com>
Thu, 18 Apr 2024 12:31:40 +0000 (08:31 -0400)
Now that assert_mem_access is relying directly on the pm_runtime state
instead of the counters, there's no reason why we cannot use
the pm_runtime functions directly.

Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240417203952.25503-8-rodrigo.vivi@intel.com
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
drivers/gpu/drm/xe/xe_device.c
drivers/gpu/drm/xe/xe_device.h
drivers/gpu/drm/xe/xe_guc_ct.c

index 1f8f9018b27c9fbb0142022869aec4b7ec57aad6..47a4bb0b0a7b99be8aa72641c149c4fecc35f4ed 100644 (file)
@@ -733,23 +733,6 @@ void xe_device_assert_mem_access(struct xe_device *xe)
        xe_assert(xe, !xe_pm_runtime_suspended(xe));
 }
 
-bool xe_device_mem_access_get_if_ongoing(struct xe_device *xe)
-{
-       bool active;
-
-       if (xe_pm_read_callback_task(xe) == current)
-               return true;
-
-       active = xe_pm_runtime_get_if_active(xe);
-       if (active) {
-               int ref = atomic_inc_return(&xe->mem_access.ref);
-
-               xe_assert(xe, ref != S32_MAX);
-       }
-
-       return active;
-}
-
 void xe_device_mem_access_get(struct xe_device *xe)
 {
        int ref;
index 39921666e1f12dbcf0980f33f4b6cae6c0ad1320..54490802e97b7bdee74794681024c9b7ab46c615 100644 (file)
@@ -134,7 +134,6 @@ static inline struct xe_force_wake *gt_to_fw(struct xe_gt *gt)
 }
 
 void xe_device_mem_access_get(struct xe_device *xe);
-bool xe_device_mem_access_get_if_ongoing(struct xe_device *xe);
 void xe_device_mem_access_put(struct xe_device *xe);
 
 void xe_device_assert_mem_access(struct xe_device *xe);
index b1412d432ec2d3318ca016c9b21657360a1d3014..ac9324338ccfb66cedc54f8d761606091d4ca4e1 100644 (file)
@@ -1210,7 +1210,7 @@ void xe_guc_ct_fast_path(struct xe_guc_ct *ct)
        bool ongoing;
        int len;
 
-       ongoing = xe_device_mem_access_get_if_ongoing(ct_to_xe(ct));
+       ongoing = xe_pm_runtime_get_if_active(ct_to_xe(ct));
        if (!ongoing && xe_pm_read_callback_task(ct_to_xe(ct)) == NULL)
                return;
 
@@ -1223,7 +1223,7 @@ void xe_guc_ct_fast_path(struct xe_guc_ct *ct)
        spin_unlock(&ct->fast_lock);
 
        if (ongoing)
-               xe_device_mem_access_put(xe);
+               xe_pm_runtime_put(xe);
 }
 
 /* Returns less than zero on error, 0 on done, 1 on more available */
@@ -1281,7 +1281,7 @@ static void g2h_worker_func(struct work_struct *w)
         * responses, if the worker here is blocked on those callbacks
         * completing, creating a deadlock.
         */
-       ongoing = xe_device_mem_access_get_if_ongoing(ct_to_xe(ct));
+       ongoing = xe_pm_runtime_get_if_active(ct_to_xe(ct));
        if (!ongoing && xe_pm_read_callback_task(ct_to_xe(ct)) == NULL)
                return;
 
@@ -1299,7 +1299,7 @@ static void g2h_worker_func(struct work_struct *w)
        } while (ret == 1);
 
        if (ongoing)
-               xe_device_mem_access_put(ct_to_xe(ct));
+               xe_pm_runtime_put(ct_to_xe(ct));
 }
 
 static void guc_ctb_snapshot_capture(struct xe_device *xe, struct guc_ctb *ctb,