if (err)
goto err_put;
- drmm_mutex_init(&xe->drm, &xe->mem_access.lock);
-
return xe;
err_put:
void xe_device_mem_access_get(struct xe_device *xe)
{
bool resumed = xe_pm_runtime_resume_if_suspended(xe);
+ int ref = atomic_inc_return(&xe->mem_access.ref);
- mutex_lock(&xe->mem_access.lock);
- if (xe->mem_access.ref++ == 0)
+ if (ref == 1)
xe->mem_access.hold_rpm = xe_pm_runtime_get_if_active(xe);
- mutex_unlock(&xe->mem_access.lock);
/* The usage counter increased if device was immediately resumed */
if (resumed)
xe_pm_runtime_put(xe);
- XE_WARN_ON(xe->mem_access.ref == S32_MAX);
+ XE_WARN_ON(ref == S32_MAX);
}
void xe_device_mem_access_put(struct xe_device *xe)
{
- mutex_lock(&xe->mem_access.lock);
- if (--xe->mem_access.ref == 0 && xe->mem_access.hold_rpm)
+ bool hold = xe->mem_access.hold_rpm;
+ int ref = atomic_dec_return(&xe->mem_access.ref);
+
+ if (!ref && hold)
xe_pm_runtime_put(xe);
- mutex_unlock(&xe->mem_access.lock);
- XE_WARN_ON(xe->mem_access.ref < 0);
+ XE_WARN_ON(ref < 0);
}
void xe_device_mem_access_get(struct xe_device *xe);
void xe_device_mem_access_put(struct xe_device *xe);
-static inline void xe_device_assert_mem_access(struct xe_device *xe)
+static inline bool xe_device_mem_access_ongoing(struct xe_device *xe)
{
- XE_WARN_ON(!xe->mem_access.ref);
+ return atomic_read(&xe->mem_access.ref);
}
-static inline bool xe_device_mem_access_ongoing(struct xe_device *xe)
+static inline void xe_device_assert_mem_access(struct xe_device *xe)
{
- bool ret;
-
- mutex_lock(&xe->mem_access.lock);
- ret = xe->mem_access.ref;
- mutex_unlock(&xe->mem_access.lock);
-
- return ret;
+ XE_WARN_ON(!xe_device_mem_access_ongoing(xe));
}
static inline bool xe_device_in_fault_mode(struct xe_device *xe)
* triggering additional actions when they occur.
*/
struct {
- /** @lock: protect the ref count */
- struct mutex lock;
/** @ref: ref count of memory accesses */
- s32 ref;
+ atomic_t ref;
/** @hold_rpm: need to put rpm ref back at the end */
bool hold_rpm;
} mem_access;