drm/xe: Use atomic instead of mutex for xe_device_mem_access_ongoing
authorMaarten Lankhorst <maarten.lankhorst@linux.intel.com>
Tue, 28 Feb 2023 10:17:30 +0000 (11:17 +0100)
committerRodrigo Vivi <rodrigo.vivi@intel.com>
Tue, 19 Dec 2023 23:30:20 +0000 (18:30 -0500)
xe_guc_ct_fast_path() is called from an irq context, and cannot lock
the mutex used by xe_device_mem_access_ongoing().

Fortunately it is easy to fix, and the atomic guarantees are good enough
to ensure xe->mem_access.hold_rpm is set before last ref is dropped.

As far as I can tell, the runtime ref in device access should be
killable, but don't dare to do it yet.

Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
Acked-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
drivers/gpu/drm/xe/xe_device.c
drivers/gpu/drm/xe/xe_device.h
drivers/gpu/drm/xe/xe_device_types.h

index 49ce11fc1174f5f1da42375eca812a3165ed3221..ffacf80c89422e077a78c900b7b75fb0469382ea 100644 (file)
@@ -206,8 +206,6 @@ struct xe_device *xe_device_create(struct pci_dev *pdev,
        if (err)
                goto err_put;
 
-       drmm_mutex_init(&xe->drm, &xe->mem_access.lock);
-
        return xe;
 
 err_put:
@@ -354,25 +352,25 @@ u32 xe_device_ccs_bytes(struct xe_device *xe, u64 size)
 void xe_device_mem_access_get(struct xe_device *xe)
 {
        bool resumed = xe_pm_runtime_resume_if_suspended(xe);
+       int ref = atomic_inc_return(&xe->mem_access.ref);
 
-       mutex_lock(&xe->mem_access.lock);
-       if (xe->mem_access.ref++ == 0)
+       if (ref == 1)
                xe->mem_access.hold_rpm = xe_pm_runtime_get_if_active(xe);
-       mutex_unlock(&xe->mem_access.lock);
 
        /* The usage counter increased if device was immediately resumed */
        if (resumed)
                xe_pm_runtime_put(xe);
 
-       XE_WARN_ON(xe->mem_access.ref == S32_MAX);
+       XE_WARN_ON(ref == S32_MAX);
 }
 
 void xe_device_mem_access_put(struct xe_device *xe)
 {
-       mutex_lock(&xe->mem_access.lock);
-       if (--xe->mem_access.ref == 0 && xe->mem_access.hold_rpm)
+       bool hold = xe->mem_access.hold_rpm;
+       int ref = atomic_dec_return(&xe->mem_access.ref);
+
+       if (!ref && hold)
                xe_pm_runtime_put(xe);
-       mutex_unlock(&xe->mem_access.lock);
 
-       XE_WARN_ON(xe->mem_access.ref < 0);
+       XE_WARN_ON(ref < 0);
 }
index 25c5087f5aad1bfd4d979e2c02d5b711a13951d3..d277f8985f7bfb5d431ea7ba0a91d875a0ce4a18 100644 (file)
@@ -90,20 +90,14 @@ static inline struct xe_force_wake * gt_to_fw(struct xe_gt *gt)
 void xe_device_mem_access_get(struct xe_device *xe);
 void xe_device_mem_access_put(struct xe_device *xe);
 
-static inline void xe_device_assert_mem_access(struct xe_device *xe)
+static inline bool xe_device_mem_access_ongoing(struct xe_device *xe)
 {
-       XE_WARN_ON(!xe->mem_access.ref);
+       return atomic_read(&xe->mem_access.ref);
 }
 
-static inline bool xe_device_mem_access_ongoing(struct xe_device *xe)
+static inline void xe_device_assert_mem_access(struct xe_device *xe)
 {
-       bool ret;
-
-       mutex_lock(&xe->mem_access.lock);
-       ret = xe->mem_access.ref;
-       mutex_unlock(&xe->mem_access.lock);
-
-       return ret;
+       XE_WARN_ON(!xe_device_mem_access_ongoing(xe));
 }
 
 static inline bool xe_device_in_fault_mode(struct xe_device *xe)
index 377a8979bc06db660942b432f6e8e333a298397f..3917b9152eb94e2d00f96d54f0a81b11d9e3b548 100644 (file)
@@ -184,10 +184,8 @@ struct xe_device {
         * triggering additional actions when they occur.
         */
        struct {
-               /** @lock: protect the ref count */
-               struct mutex lock;
                /** @ref: ref count of memory accesses */
-               s32 ref;
+               atomic_t ref;
                /** @hold_rpm: need to put rpm ref back at the end */
                bool hold_rpm;
        } mem_access;