locking/lock_events: Add locking events for rtmutex slow paths
authorWaiman Long <longman@redhat.com>
Fri, 7 Mar 2025 23:26:53 +0000 (15:26 -0800)
committerIngo Molnar <mingo@kernel.org>
Fri, 7 Mar 2025 23:55:03 +0000 (00:55 +0100)
Add locking events for rtlock_slowlock() and rt_mutex_slowlock() for
profiling the slow path behavior of rt_spin_lock() and rt_mutex_lock().

Signed-off-by: Waiman Long <longman@redhat.com>
Signed-off-by: Boqun Feng <boqun.feng@gmail.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Link: https://lore.kernel.org/r/20250307232717.1759087-4-boqun.feng@gmail.com
kernel/locking/lock_events_list.h
kernel/locking/rtmutex.c

index 97fb6f3f840aa7e5d5a0baca616a913f397548ff..80b11f194c9f7de749a7fdc64aaa4a5fc8fd8e3e 100644 (file)
@@ -67,3 +67,24 @@ LOCK_EVENT(rwsem_rlock_handoff)      /* # of read lock handoffs              */
 LOCK_EVENT(rwsem_wlock)                /* # of write locks acquired            */
 LOCK_EVENT(rwsem_wlock_fail)   /* # of failed write lock acquisitions  */
 LOCK_EVENT(rwsem_wlock_handoff)        /* # of write lock handoffs             */
+
+/*
+ * Locking events for rtlock_slowlock()
+ */
+LOCK_EVENT(rtlock_slowlock)    /* # of rtlock_slowlock() calls         */
+LOCK_EVENT(rtlock_slow_acq1)   /* # of locks acquired after wait_lock  */
+LOCK_EVENT(rtlock_slow_acq2)   /* # of locks acquired in for loop      */
+LOCK_EVENT(rtlock_slow_sleep)  /* # of sleeps                          */
+LOCK_EVENT(rtlock_slow_wake)   /* # of wakeup's                        */
+
+/*
+ * Locking events for rt_mutex_slowlock()
+ */
+LOCK_EVENT(rtmutex_slowlock)   /* # of rt_mutex_slowlock() calls       */
+LOCK_EVENT(rtmutex_slow_block) /* # of rt_mutex_slowlock_block() calls */
+LOCK_EVENT(rtmutex_slow_acq1)  /* # of locks acquired after wait_lock  */
+LOCK_EVENT(rtmutex_slow_acq2)  /* # of locks acquired at the end       */
+LOCK_EVENT(rtmutex_slow_acq3)  /* # of locks acquired in *block()      */
+LOCK_EVENT(rtmutex_slow_sleep) /* # of sleeps                          */
+LOCK_EVENT(rtmutex_slow_wake)  /* # of wakeup's                        */
+LOCK_EVENT(rtmutex_deadlock)   /* # of rt_mutex_handle_deadlock()'s    */
index 4a8df1800cbbd1aef0a1ac96c426d57533f7fbe7..c80902eacd797c669dedcf10966a8cff38524b50 100644 (file)
@@ -27,6 +27,7 @@
 #include <trace/events/lock.h>
 
 #include "rtmutex_common.h"
+#include "lock_events.h"
 
 #ifndef WW_RT
 # define build_ww_mutex()      (false)
@@ -1612,10 +1613,13 @@ static int __sched rt_mutex_slowlock_block(struct rt_mutex_base *lock,
        struct task_struct *owner;
        int ret = 0;
 
+       lockevent_inc(rtmutex_slow_block);
        for (;;) {
                /* Try to acquire the lock: */
-               if (try_to_take_rt_mutex(lock, current, waiter))
+               if (try_to_take_rt_mutex(lock, current, waiter)) {
+                       lockevent_inc(rtmutex_slow_acq3);
                        break;
+               }
 
                if (timeout && !timeout->task) {
                        ret = -ETIMEDOUT;
@@ -1638,8 +1642,10 @@ static int __sched rt_mutex_slowlock_block(struct rt_mutex_base *lock,
                        owner = NULL;
                raw_spin_unlock_irq_wake(&lock->wait_lock, wake_q);
 
-               if (!owner || !rtmutex_spin_on_owner(lock, waiter, owner))
+               if (!owner || !rtmutex_spin_on_owner(lock, waiter, owner)) {
+                       lockevent_inc(rtmutex_slow_sleep);
                        rt_mutex_schedule();
+               }
 
                raw_spin_lock_irq(&lock->wait_lock);
                set_current_state(state);
@@ -1694,6 +1700,7 @@ static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock,
        int ret;
 
        lockdep_assert_held(&lock->wait_lock);
+       lockevent_inc(rtmutex_slowlock);
 
        /* Try to acquire the lock again: */
        if (try_to_take_rt_mutex(lock, current, NULL)) {
@@ -1701,6 +1708,7 @@ static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock,
                        __ww_mutex_check_waiters(rtm, ww_ctx, wake_q);
                        ww_mutex_lock_acquired(ww, ww_ctx);
                }
+               lockevent_inc(rtmutex_slow_acq1);
                return 0;
        }
 
@@ -1719,10 +1727,12 @@ static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock,
                                __ww_mutex_check_waiters(rtm, ww_ctx, wake_q);
                        ww_mutex_lock_acquired(ww, ww_ctx);
                }
+               lockevent_inc(rtmutex_slow_acq2);
        } else {
                __set_current_state(TASK_RUNNING);
                remove_waiter(lock, waiter);
                rt_mutex_handle_deadlock(ret, chwalk, lock, waiter);
+               lockevent_inc(rtmutex_deadlock);
        }
 
        /*
@@ -1751,6 +1761,7 @@ static inline int __rt_mutex_slowlock_locked(struct rt_mutex_base *lock,
                                  &waiter, wake_q);
 
        debug_rt_mutex_free_waiter(&waiter);
+       lockevent_cond_inc(rtmutex_slow_wake, !wake_q_empty(wake_q));
        return ret;
 }
 
@@ -1823,9 +1834,12 @@ static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock,
        struct task_struct *owner;
 
        lockdep_assert_held(&lock->wait_lock);
+       lockevent_inc(rtlock_slowlock);
 
-       if (try_to_take_rt_mutex(lock, current, NULL))
+       if (try_to_take_rt_mutex(lock, current, NULL)) {
+               lockevent_inc(rtlock_slow_acq1);
                return;
+       }
 
        rt_mutex_init_rtlock_waiter(&waiter);
 
@@ -1838,8 +1852,10 @@ static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock,
 
        for (;;) {
                /* Try to acquire the lock again */
-               if (try_to_take_rt_mutex(lock, current, &waiter))
+               if (try_to_take_rt_mutex(lock, current, &waiter)) {
+                       lockevent_inc(rtlock_slow_acq2);
                        break;
+               }
 
                if (&waiter == rt_mutex_top_waiter(lock))
                        owner = rt_mutex_owner(lock);
@@ -1847,8 +1863,10 @@ static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock,
                        owner = NULL;
                raw_spin_unlock_irq_wake(&lock->wait_lock, wake_q);
 
-               if (!owner || !rtmutex_spin_on_owner(lock, &waiter, owner))
+               if (!owner || !rtmutex_spin_on_owner(lock, &waiter, owner)) {
+                       lockevent_inc(rtlock_slow_sleep);
                        schedule_rtlock();
+               }
 
                raw_spin_lock_irq(&lock->wait_lock);
                set_current_state(TASK_RTLOCK_WAIT);
@@ -1865,6 +1883,7 @@ static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock,
        debug_rt_mutex_free_waiter(&waiter);
 
        trace_contention_end(lock, 0);
+       lockevent_cond_inc(rtlock_slow_wake, !wake_q_empty(wake_q));
 }
 
 static __always_inline void __sched rtlock_slowlock(struct rt_mutex_base *lock)