xfs: introduce in-core global counter of allocbt blocks
authorBrian Foster <bfoster@redhat.com>
Wed, 28 Apr 2021 22:05:50 +0000 (15:05 -0700)
committerDarrick J. Wong <djwong@kernel.org>
Thu, 29 Apr 2021 14:45:44 +0000 (07:45 -0700)
Introduce an in-core counter to track the sum of all allocbt blocks
used by the filesystem. This value is currently tracked per-ag via
the ->agf_btreeblks field in the AGF, which also happens to include
rmapbt blocks. A global, in-core count of allocbt blocks is required
to identify the subset of global ->m_fdblocks that consists of
unavailable blocks currently used for allocation btrees. To support
this calculation at block reservation time, construct a similar
global counter for allocbt blocks, populate it on first read of each
AGF and update it as allocbt blocks are used and released.

Signed-off-by: Brian Foster <bfoster@redhat.com>
Reviewed-by: Chandan Babu R <chandanrlinux@gmail.com>
Reviewed-by: Allison Henderson <allison.henderson@oracle.com>
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
Signed-off-by: Darrick J. Wong <djwong@kernel.org>
fs/xfs/libxfs/xfs_alloc.c
fs/xfs/libxfs/xfs_alloc_btree.c
fs/xfs/xfs_mount.h

index f52b9e4a03f9c8f2ff8b40b46cee980042ba1758..82b7cbb1f24f34c4b1ebe55b672c8a17f3030f0b 100644 (file)
@@ -3033,6 +3033,7 @@ xfs_alloc_read_agf(
        struct xfs_agf          *agf;           /* ag freelist header */
        struct xfs_perag        *pag;           /* per allocation group data */
        int                     error;
+       int                     allocbt_blks;
 
        trace_xfs_alloc_read_agf(mp, agno);
 
@@ -3063,6 +3064,19 @@ xfs_alloc_read_agf(
                pag->pagf_refcount_level = be32_to_cpu(agf->agf_refcount_level);
                pag->pagf_init = 1;
                pag->pagf_agflreset = xfs_agfl_needs_reset(mp, agf);
+
+               /*
+                * Update the in-core allocbt counter. Filter out the rmapbt
+                * subset of the btreeblks counter because the rmapbt is managed
+                * by perag reservation. Subtract one for the rmapbt root block
+                * because the rmap counter includes it while the btreeblks
+                * counter only tracks non-root blocks.
+                */
+               allocbt_blks = pag->pagf_btreeblks;
+               if (xfs_sb_version_hasrmapbt(&mp->m_sb))
+                       allocbt_blks -= be32_to_cpu(agf->agf_rmap_blocks) - 1;
+               if (allocbt_blks > 0)
+                       atomic64_add(allocbt_blks, &mp->m_allocbt_blks);
        }
 #ifdef DEBUG
        else if (!XFS_FORCED_SHUTDOWN(mp)) {
index dbe302d1cb8de526dc09eb2356683b0a470d0242..a43e4c50e69b7ba9bd828e742125c2b75f4334d3 100644 (file)
@@ -71,6 +71,7 @@ xfs_allocbt_alloc_block(
                return 0;
        }
 
+       atomic64_inc(&cur->bc_mp->m_allocbt_blks);
        xfs_extent_busy_reuse(cur->bc_mp, cur->bc_ag.agno, bno, 1, false);
 
        new->s = cpu_to_be32(bno);
@@ -94,6 +95,7 @@ xfs_allocbt_free_block(
        if (error)
                return error;
 
+       atomic64_dec(&cur->bc_mp->m_allocbt_blks);
        xfs_extent_busy_insert(cur->bc_tp, be32_to_cpu(agf->agf_seqno), bno, 1,
                              XFS_EXTENT_BUSY_SKIP_DISCARD);
        return 0;
index 81829d19596e02ae7f3977f9dd42a3039efebc64..bb67274ee23f15947f45ffa14a799b11041ce035 100644 (file)
@@ -170,6 +170,12 @@ typedef struct xfs_mount {
         * extents or anything related to the rt device.
         */
        struct percpu_counter   m_delalloc_blks;
+       /*
+        * Global count of allocation btree blocks in use across all AGs. Only
+        * used when perag reservation is enabled. Helps prevent block
+        * reservation from attempting to reserve allocation btree blocks.
+        */
+       atomic64_t              m_allocbt_blks;
 
        struct radix_tree_root  m_perag_tree;   /* per-ag accounting info */
        spinlock_t              m_perag_lock;   /* lock for m_perag_tree */