fs: fall back to file_ref_put() for non-last reference
authorMateusz Guzik <mjguzik@gmail.com>
Fri, 18 Apr 2025 12:57:56 +0000 (14:57 +0200)
committerChristian Brauner <brauner@kernel.org>
Tue, 22 Apr 2025 16:16:09 +0000 (18:16 +0200)
This reduces the slowdown in face of multiple callers issuing close on
what turns out to not be the last reference.

Signed-off-by: Mateusz Guzik <mjguzik@gmail.com>
Link: https://lore.kernel.org/20250418125756.59677-1-mjguzik@gmail.com
Reviewed-by: Jan Kara <jack@suse.cz>
Reported-by: kernel test robot <oliver.sang@intel.com>
Closes: https://lore.kernel.org/oe-lkp/202504171513.6d6f8a16-lkp@intel.com
Signed-off-by: Christian Brauner <brauner@kernel.org>
fs/file.c
include/linux/file_ref.h

index dc3f7e120e3e5d51cac2567e431efa62325882e8..3a3146664cf37115624e12f7f06826d48827e9d7 100644 (file)
--- a/fs/file.c
+++ b/fs/file.c
@@ -26,7 +26,7 @@
 
 #include "internal.h"
 
-bool __file_ref_put_badval(file_ref_t *ref, unsigned long cnt)
+static noinline bool __file_ref_put_badval(file_ref_t *ref, unsigned long cnt)
 {
        /*
         * If the reference count was already in the dead zone, then this
index 7db62fbc0500b009059520502a441cee471fa74f..31551e4cb8f34c04dafca97a21846df58960e524 100644 (file)
@@ -61,7 +61,6 @@ static inline void file_ref_init(file_ref_t *ref, unsigned long cnt)
        atomic_long_set(&ref->refcnt, cnt - 1);
 }
 
-bool __file_ref_put_badval(file_ref_t *ref, unsigned long cnt);
 bool __file_ref_put(file_ref_t *ref, unsigned long cnt);
 
 /**
@@ -178,20 +177,14 @@ static __always_inline __must_check bool file_ref_put(file_ref_t *ref)
  */
 static __always_inline __must_check bool file_ref_put_close(file_ref_t *ref)
 {
-       long old, new;
+       long old;
 
        old = atomic_long_read(&ref->refcnt);
-       do {
-               if (unlikely(old < 0))
-                       return __file_ref_put_badval(ref, old);
-
-               if (old == FILE_REF_ONEREF)
-                       new = FILE_REF_DEAD;
-               else
-                       new = old - 1;
-       } while (!atomic_long_try_cmpxchg(&ref->refcnt, &old, new));
-
-       return new == FILE_REF_DEAD;
+       if (likely(old == FILE_REF_ONEREF)) {
+               if (likely(atomic_long_try_cmpxchg(&ref->refcnt, &old, FILE_REF_DEAD)))
+                       return true;
+       }
+       return file_ref_put(ref);
 }
 
 /**