gfs2: finish_xmote cleanup
authorAndreas Gruenbacher <agruenba@redhat.com>
Fri, 12 Apr 2024 17:16:58 +0000 (19:16 +0200)
committerAndreas Gruenbacher <agruenba@redhat.com>
Wed, 24 Apr 2024 17:48:20 +0000 (19:48 +0200)
Currently, function finish_xmote() takes and releases the glock
spinlock.  However, all of its callers immediately take that spinlock
again, so it makes more sense to take the spin lock before calling
finish_xmote() already.

With that, thaw_glock() is the only place that sets the GLF_HAVE_REPLY
flag outside of the glock spinlock, but it also takes that spinlock
immediately thereafter.  Change that to set the bit when the spinlock is
already held.  This allows to switch from test_and_clear_bit() to
test_bit() and clear_bit() in glock_work_func().

Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
fs/gfs2/glock.c

index 4cf8971ce8ee02440b7ea2d5e79aab0d83223b6d..b1a2862d431d86a82b292886766ed646c469b06a 100644 (file)
@@ -625,7 +625,6 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
        struct gfs2_holder *gh;
        unsigned state = ret & LM_OUT_ST_MASK;
 
-       spin_lock(&gl->gl_lockref.lock);
        trace_gfs2_glock_state_change(gl, state);
        state_change(gl, state);
        gh = find_first_waiter(gl);
@@ -673,7 +672,6 @@ retry:
                               gl->gl_target, state);
                        GLOCK_BUG_ON(gl, 1);
                }
-               spin_unlock(&gl->gl_lockref.lock);
                return;
        }
 
@@ -696,7 +694,6 @@ retry:
        }
 out:
        clear_bit(GLF_LOCK, &gl->gl_flags);
-       spin_unlock(&gl->gl_lockref.lock);
 }
 
 static bool is_system_glock(struct gfs2_glock *gl)
@@ -843,15 +840,19 @@ skip_inval:
                if (ret == -EINVAL && gl->gl_target == LM_ST_UNLOCKED &&
                    target == LM_ST_UNLOCKED &&
                    test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) {
+                       spin_lock(&gl->gl_lockref.lock);
                        finish_xmote(gl, target);
-                       gfs2_glock_queue_work(gl, 0);
+                       __gfs2_glock_queue_work(gl, 0);
+                       spin_unlock(&gl->gl_lockref.lock);
                } else if (ret) {
                        fs_err(sdp, "lm_lock ret %d\n", ret);
                        GLOCK_BUG_ON(gl, !gfs2_withdrawing_or_withdrawn(sdp));
                }
        } else { /* lock_nolock */
+               spin_lock(&gl->gl_lockref.lock);
                finish_xmote(gl, target);
-               gfs2_glock_queue_work(gl, 0);
+               __gfs2_glock_queue_work(gl, 0);
+               spin_unlock(&gl->gl_lockref.lock);
        }
 out:
        spin_lock(&gl->gl_lockref.lock);
@@ -1108,11 +1109,12 @@ static void glock_work_func(struct work_struct *work)
        struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
        unsigned int drop_refs = 1;
 
-       if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
+       spin_lock(&gl->gl_lockref.lock);
+       if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
+               clear_bit(GLF_REPLY_PENDING, &gl->gl_flags);
                finish_xmote(gl, gl->gl_reply);
                drop_refs++;
        }
-       spin_lock(&gl->gl_lockref.lock);
        if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
            gl->gl_state != LM_ST_UNLOCKED &&
            gl->gl_demote_state != LM_ST_EXCLUSIVE) {
@@ -2183,8 +2185,11 @@ static void thaw_glock(struct gfs2_glock *gl)
                return;
        if (!lockref_get_not_dead(&gl->gl_lockref))
                return;
+
+       spin_lock(&gl->gl_lockref.lock);
        set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
-       gfs2_glock_queue_work(gl, 0);
+       __gfs2_glock_queue_work(gl, 0);
+       spin_unlock(&gl->gl_lockref.lock);
 }
 
 /**