btrfs: move btrfs_set_path_blocking to other locking functions
authorDavid Sterba <dsterba@suse.com>
Tue, 24 Sep 2019 17:17:17 +0000 (19:17 +0200)
committerDavid Sterba <dsterba@suse.com>
Mon, 18 Nov 2019 11:46:49 +0000 (12:46 +0100)
The function belongs to the family of locking functions, so move it
there. The 'noinline' keyword is dropped as it's now an exported
function that does not need it.

Reviewed-by: Josef Bacik <josef@toxicpanda.com>
Signed-off-by: David Sterba <dsterba@suse.com>
fs/btrfs/ctree.c
fs/btrfs/locking.c
fs/btrfs/locking.h

index 0231141de289f98515e69f00644fd7895ad7eb95..a55d55e5c913eba2b62c4fa860694f5caaa4c71a 100644 (file)
@@ -56,31 +56,6 @@ struct btrfs_path *btrfs_alloc_path(void)
        return kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
 }
 
-/*
- * set all locked nodes in the path to blocking locks.  This should
- * be done before scheduling
- */
-noinline void btrfs_set_path_blocking(struct btrfs_path *p)
-{
-       int i;
-       for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
-               if (!p->nodes[i] || !p->locks[i])
-                       continue;
-               /*
-                * If we currently have a spinning reader or writer lock this
-                * will bump the count of blocking holders and drop the
-                * spinlock.
-                */
-               if (p->locks[i] == BTRFS_READ_LOCK) {
-                       btrfs_set_lock_blocking_read(p->nodes[i]);
-                       p->locks[i] = BTRFS_READ_LOCK_BLOCKING;
-               } else if (p->locks[i] == BTRFS_WRITE_LOCK) {
-                       btrfs_set_lock_blocking_write(p->nodes[i]);
-                       p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING;
-               }
-       }
-}
-
 /* this also releases the path */
 void btrfs_free_path(struct btrfs_path *p)
 {
index 028513153ac4e5738d8e2216b33dff284e0aa272..f58606887859b70c18aa234377d8fa743ff94297 100644 (file)
@@ -316,3 +316,29 @@ void btrfs_tree_unlock(struct extent_buffer *eb)
                write_unlock(&eb->lock);
        }
 }
+
+/*
+ * Set all locked nodes in the path to blocking locks.  This should be done
+ * before scheduling
+ */
+void btrfs_set_path_blocking(struct btrfs_path *p)
+{
+       int i;
+
+       for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
+               if (!p->nodes[i] || !p->locks[i])
+                       continue;
+               /*
+                * If we currently have a spinning reader or writer lock this
+                * will bump the count of blocking holders and drop the
+                * spinlock.
+                */
+               if (p->locks[i] == BTRFS_READ_LOCK) {
+                       btrfs_set_lock_blocking_read(p->nodes[i]);
+                       p->locks[i] = BTRFS_READ_LOCK_BLOCKING;
+               } else if (p->locks[i] == BTRFS_WRITE_LOCK) {
+                       btrfs_set_lock_blocking_write(p->nodes[i]);
+                       p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING;
+               }
+       }
+}
index ab4020de25e7017573990f011d3ad72ab096db81..98c92222eaf06597ea6eaf8cd79f2b30952699b9 100644 (file)
@@ -33,6 +33,8 @@ static inline void btrfs_assert_tree_locked(struct extent_buffer *eb) {
 static inline void btrfs_assert_tree_locked(struct extent_buffer *eb) { }
 #endif
 
+void btrfs_set_path_blocking(struct btrfs_path *p);
+
 static inline void btrfs_tree_unlock_rw(struct extent_buffer *eb, int rw)
 {
        if (rw == BTRFS_WRITE_LOCK || rw == BTRFS_WRITE_LOCK_BLOCKING)