btrfs: remove inode_lock from struct btrfs_root and use xarray locks
authorFilipe Manana <fdmanana@suse.com>
Mon, 6 May 2024 12:27:29 +0000 (13:27 +0100)
committerDavid Sterba <dsterba@suse.com>
Thu, 11 Jul 2024 13:33:17 +0000 (15:33 +0200)
Currently we use the spinlock inode_lock from struct btrfs_root to
serialize access to two different data structures:

1) The delayed inodes xarray (struct btrfs_root::delayed_nodes);
2) The inodes xarray (struct btrfs_root::inodes).

Instead of using our own lock, we can use the spinlock that is part of the
xarray implementation, by using the xa_lock() and xa_unlock() APIs and
using the xarray APIs with the double underscore prefix that don't take
the xarray locks and assume the caller is using xa_lock() and xa_unlock().

So remove the spinlock inode_lock from struct btrfs_root and use the
corresponding xarray locks. This brings 2 benefits:

1) We reduce the size of struct btrfs_root, from 1336 bytes down to
   1328 bytes on a 64 bits release kernel config;

2) We reduce lock contention by not using anymore the same lock for
   changing two different and unrelated xarrays.

Reviewed-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: Filipe Manana <fdmanana@suse.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
fs/btrfs/ctree.h
fs/btrfs/delayed-inode.c
fs/btrfs/disk-io.c
fs/btrfs/inode.c

index aa2568f86dc9fdacc0bc02c557bcad55ae87e8b5..1004cb934b4ac7105ead247ae71ec8223465826d 100644 (file)
@@ -221,7 +221,6 @@ struct btrfs_root {
 
        struct list_head root_list;
 
-       spinlock_t inode_lock;
        /*
         * Xarray that keeps track of in-memory inodes, protected by the lock
         * @inode_lock.
index 95a0497fa86692d764eea3179eb3c25927bbfa84..40e617c7e8a1333828aa2b4d1f27a3c8a09c6945 100644 (file)
@@ -77,14 +77,14 @@ static struct btrfs_delayed_node *btrfs_get_delayed_node(
                return node;
        }
 
-       spin_lock(&root->inode_lock);
+       xa_lock(&root->delayed_nodes);
        node = xa_load(&root->delayed_nodes, ino);
 
        if (node) {
                if (btrfs_inode->delayed_node) {
                        refcount_inc(&node->refs);      /* can be accessed */
                        BUG_ON(btrfs_inode->delayed_node != node);
-                       spin_unlock(&root->inode_lock);
+                       xa_unlock(&root->delayed_nodes);
                        return node;
                }
 
@@ -111,10 +111,10 @@ static struct btrfs_delayed_node *btrfs_get_delayed_node(
                        node = NULL;
                }
 
-               spin_unlock(&root->inode_lock);
+               xa_unlock(&root->delayed_nodes);
                return node;
        }
-       spin_unlock(&root->inode_lock);
+       xa_unlock(&root->delayed_nodes);
 
        return NULL;
 }
@@ -148,21 +148,21 @@ again:
                kmem_cache_free(delayed_node_cache, node);
                return ERR_PTR(-ENOMEM);
        }
-       spin_lock(&root->inode_lock);
+       xa_lock(&root->delayed_nodes);
        ptr = xa_load(&root->delayed_nodes, ino);
        if (ptr) {
                /* Somebody inserted it, go back and read it. */
-               spin_unlock(&root->inode_lock);
+               xa_unlock(&root->delayed_nodes);
                kmem_cache_free(delayed_node_cache, node);
                node = NULL;
                goto again;
        }
-       ptr = xa_store(&root->delayed_nodes, ino, node, GFP_ATOMIC);
+       ptr = __xa_store(&root->delayed_nodes, ino, node, GFP_ATOMIC);
        ASSERT(xa_err(ptr) != -EINVAL);
        ASSERT(xa_err(ptr) != -ENOMEM);
        ASSERT(ptr == NULL);
        btrfs_inode->delayed_node = node;
-       spin_unlock(&root->inode_lock);
+       xa_unlock(&root->delayed_nodes);
 
        return node;
 }
@@ -275,14 +275,12 @@ static void __btrfs_release_delayed_node(
        if (refcount_dec_and_test(&delayed_node->refs)) {
                struct btrfs_root *root = delayed_node->root;
 
-               spin_lock(&root->inode_lock);
+               xa_erase(&root->delayed_nodes, delayed_node->inode_id);
                /*
                 * Once our refcount goes to zero, nobody is allowed to bump it
                 * back up.  We can delete it now.
                 */
                ASSERT(refcount_read(&delayed_node->refs) == 0);
-               xa_erase(&root->delayed_nodes, delayed_node->inode_id);
-               spin_unlock(&root->inode_lock);
                kmem_cache_free(delayed_node_cache, delayed_node);
        }
 }
@@ -2057,9 +2055,9 @@ void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
                struct btrfs_delayed_node *node;
                int count;
 
-               spin_lock(&root->inode_lock);
+               xa_lock(&root->delayed_nodes);
                if (xa_empty(&root->delayed_nodes)) {
-                       spin_unlock(&root->inode_lock);
+                       xa_unlock(&root->delayed_nodes);
                        return;
                }
 
@@ -2076,7 +2074,7 @@ void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
                        if (count >= ARRAY_SIZE(delayed_nodes))
                                break;
                }
-               spin_unlock(&root->inode_lock);
+               xa_unlock(&root->delayed_nodes);
                index++;
 
                for (int i = 0; i < count; i++) {
index aa85be9661cc4fa193bb3885d918f7e26ae1b108..886ff6a72a9b05576869d6c975dadb2b2ea1cac9 100644 (file)
@@ -674,7 +674,6 @@ static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
        INIT_LIST_HEAD(&root->ordered_extents);
        INIT_LIST_HEAD(&root->ordered_root);
        INIT_LIST_HEAD(&root->reloc_dirty_list);
-       spin_lock_init(&root->inode_lock);
        spin_lock_init(&root->delalloc_lock);
        spin_lock_init(&root->ordered_extent_lock);
        spin_lock_init(&root->accounting_lock);
index e05915133fd0c9cd4f1e12e42f7f361fe053c2d8..2a8bc014579ef2af86026bec9444924ea1856b6b 100644 (file)
@@ -5509,9 +5509,7 @@ static int btrfs_add_inode_to_root(struct btrfs_inode *inode, bool prealloc)
                        return ret;
        }
 
-       spin_lock(&root->inode_lock);
        existing = xa_store(&root->inodes, ino, inode, GFP_ATOMIC);
-       spin_unlock(&root->inode_lock);
 
        if (xa_is_err(existing)) {
                ret = xa_err(existing);
@@ -5531,16 +5529,16 @@ static void btrfs_del_inode_from_root(struct btrfs_inode *inode)
        struct btrfs_inode *entry;
        bool empty = false;
 
-       spin_lock(&root->inode_lock);
-       entry = xa_erase(&root->inodes, btrfs_ino(inode));
+       xa_lock(&root->inodes);
+       entry = __xa_erase(&root->inodes, btrfs_ino(inode));
        if (entry == inode)
                empty = xa_empty(&root->inodes);
-       spin_unlock(&root->inode_lock);
+       xa_unlock(&root->inodes);
 
        if (empty && btrfs_root_refs(&root->root_item) == 0) {
-               spin_lock(&root->inode_lock);
+               xa_lock(&root->inodes);
                empty = xa_empty(&root->inodes);
-               spin_unlock(&root->inode_lock);
+               xa_unlock(&root->inodes);
                if (empty)
                        btrfs_add_dead_root(root);
        }
@@ -10874,7 +10872,7 @@ struct btrfs_inode *btrfs_find_first_inode(struct btrfs_root *root, u64 min_ino)
        struct btrfs_inode *inode;
        unsigned long from = min_ino;
 
-       spin_lock(&root->inode_lock);
+       xa_lock(&root->inodes);
        while (true) {
                inode = xa_find(&root->inodes, &from, ULONG_MAX, XA_PRESENT);
                if (!inode)
@@ -10883,9 +10881,9 @@ struct btrfs_inode *btrfs_find_first_inode(struct btrfs_root *root, u64 min_ino)
                        break;
 
                from = btrfs_ino(inode) + 1;
-               cond_resched_lock(&root->inode_lock);
+               cond_resched_lock(&root->inodes.xa_lock);
        }
-       spin_unlock(&root->inode_lock);
+       xa_unlock(&root->inodes);
 
        return inode;
 }