xfs: convert kmem_alloc() to kmalloc()
authorDave Chinner <dchinner@redhat.com>
Mon, 15 Jan 2024 22:59:40 +0000 (09:59 +1100)
committerChandan Babu R <chandanbabu@kernel.org>
Tue, 13 Feb 2024 12:37:34 +0000 (18:07 +0530)
kmem_alloc() is just a thin wrapper around kmalloc() these days.
Convert everything to use kmalloc() so we can get rid of the
wrapper.

Note: the transaction region allocation in xlog_add_to_transaction()
can be a high order allocation. Converting it to use
kmalloc(__GFP_NOFAIL) results in warnings in the page allocation
code being triggered because the mm subsystem does not want us to
use __GFP_NOFAIL with high order allocations like we've been doing
with the kmem_alloc() wrapper for a couple of decades. Hence this
specific case gets converted to xlog_kvmalloc() rather than
kmalloc() to avoid this issue.

Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: "Darrick J. Wong" <djwong@kernel.org>
Signed-off-by: Chandan Babu R <chandanbabu@kernel.org>
21 files changed:
fs/xfs/Makefile
fs/xfs/kmem.c [deleted file]
fs/xfs/kmem.h
fs/xfs/libxfs/xfs_attr_leaf.c
fs/xfs/libxfs/xfs_btree_staging.c
fs/xfs/libxfs/xfs_da_btree.c
fs/xfs/libxfs/xfs_dir2.c
fs/xfs/libxfs/xfs_dir2_block.c
fs/xfs/libxfs/xfs_dir2_sf.c
fs/xfs/libxfs/xfs_inode_fork.c
fs/xfs/xfs_attr_list.c
fs/xfs/xfs_buf.c
fs/xfs/xfs_buf_item_recover.c
fs/xfs/xfs_filestream.c
fs/xfs/xfs_inode_item_recover.c
fs/xfs/xfs_iwalk.c
fs/xfs/xfs_log_recover.c
fs/xfs/xfs_qm.c
fs/xfs/xfs_rtalloc.c
fs/xfs/xfs_super.c
fs/xfs/xfs_trace.h

index fbe3cdc79036bee2aed44c2c1618007b89448fdb..35a23427055b8fec245c24bc8364995e79b632c0 100644 (file)
@@ -92,8 +92,7 @@ xfs-y                         += xfs_aops.o \
                                   xfs_symlink.o \
                                   xfs_sysfs.o \
                                   xfs_trans.o \
-                                  xfs_xattr.o \
-                                  kmem.o
+                                  xfs_xattr.o
 
 # low-level transaction/log code
 xfs-y                          += xfs_log.o \
diff --git a/fs/xfs/kmem.c b/fs/xfs/kmem.c
deleted file mode 100644 (file)
index c557a03..0000000
+++ /dev/null
@@ -1,30 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (c) 2000-2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- */
-#include "xfs.h"
-#include "xfs_message.h"
-#include "xfs_trace.h"
-
-void *
-kmem_alloc(size_t size, xfs_km_flags_t flags)
-{
-       int     retries = 0;
-       gfp_t   lflags = kmem_flags_convert(flags);
-       void    *ptr;
-
-       trace_kmem_alloc(size, flags, _RET_IP_);
-
-       do {
-               ptr = kmalloc(size, lflags);
-               if (ptr || (flags & KM_MAYFAIL))
-                       return ptr;
-               if (!(++retries % 100))
-                       xfs_err(NULL,
-       "%s(%u) possible memory allocation deadlock size %u in %s (mode:0x%x)",
-                               current->comm, current->pid,
-                               (unsigned int)size, __func__, lflags);
-               memalloc_retry_wait(lflags);
-       } while (1);
-}
index bce31182c9e8d8da7564fa6dd4f44424b9e05aea..1343f1a6f99b1127ca37a598f2ddf880bf53de98 100644 (file)
  * General memory allocation interfaces
  */
 
-typedef unsigned __bitwise xfs_km_flags_t;
-#define KM_NOFS                ((__force xfs_km_flags_t)0x0004u)
-#define KM_MAYFAIL     ((__force xfs_km_flags_t)0x0008u)
-#define KM_ZERO                ((__force xfs_km_flags_t)0x0010u)
-#define KM_NOLOCKDEP   ((__force xfs_km_flags_t)0x0020u)
-
-/*
- * We use a special process flag to avoid recursive callbacks into
- * the filesystem during transactions.  We will also issue our own
- * warnings, so we explicitly skip any generic ones (silly of us).
- */
-static inline gfp_t
-kmem_flags_convert(xfs_km_flags_t flags)
-{
-       gfp_t   lflags;
-
-       BUG_ON(flags & ~(KM_NOFS | KM_MAYFAIL | KM_ZERO | KM_NOLOCKDEP));
-
-       lflags = GFP_KERNEL | __GFP_NOWARN;
-       if (flags & KM_NOFS)
-               lflags &= ~__GFP_FS;
-
-       /*
-        * Default page/slab allocator behavior is to retry for ever
-        * for small allocations. We can override this behavior by using
-        * __GFP_RETRY_MAYFAIL which will tell the allocator to retry as long
-        * as it is feasible but rather fail than retry forever for all
-        * request sizes.
-        */
-       if (flags & KM_MAYFAIL)
-               lflags |= __GFP_RETRY_MAYFAIL;
-
-       if (flags & KM_ZERO)
-               lflags |= __GFP_ZERO;
-
-       if (flags & KM_NOLOCKDEP)
-               lflags |= __GFP_NOLOCKDEP;
-
-       return lflags;
-}
-
-extern void *kmem_alloc(size_t, xfs_km_flags_t);
 static inline void  kmem_free(const void *ptr)
 {
        kvfree(ptr);
index ab4223bf51eee2c9de4e15dda6bc9ba40bdbb0c4..033382cf514dadff5b4a5c70f1a0526cc3676f0b 100644 (file)
@@ -879,8 +879,7 @@ xfs_attr_shortform_to_leaf(
 
        trace_xfs_attr_sf_to_leaf(args);
 
-       tmpbuffer = kmem_alloc(size, 0);
-       ASSERT(tmpbuffer != NULL);
+       tmpbuffer = kmalloc(size, GFP_KERNEL | __GFP_NOFAIL);
        memcpy(tmpbuffer, ifp->if_data, size);
        sf = (struct xfs_attr_sf_hdr *)tmpbuffer;
 
@@ -1059,7 +1058,7 @@ xfs_attr3_leaf_to_shortform(
 
        trace_xfs_attr_leaf_to_sf(args);
 
-       tmpbuffer = kmem_alloc(args->geo->blksize, 0);
+       tmpbuffer = kmalloc(args->geo->blksize, GFP_KERNEL | __GFP_NOFAIL);
        if (!tmpbuffer)
                return -ENOMEM;
 
@@ -1533,7 +1532,7 @@ xfs_attr3_leaf_compact(
 
        trace_xfs_attr_leaf_compact(args);
 
-       tmpbuffer = kmem_alloc(args->geo->blksize, 0);
+       tmpbuffer = kmalloc(args->geo->blksize, GFP_KERNEL | __GFP_NOFAIL);
        memcpy(tmpbuffer, bp->b_addr, args->geo->blksize);
        memset(bp->b_addr, 0, args->geo->blksize);
        leaf_src = (xfs_attr_leafblock_t *)tmpbuffer;
index eff29425fd762bb8c61b6567b8377843b079e3dc..065e4a00a2f4fa298935030c0b7fb449bd5239c0 100644 (file)
@@ -139,7 +139,7 @@ xfs_btree_stage_afakeroot(
        ASSERT(!(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE));
        ASSERT(cur->bc_tp == NULL);
 
-       nops = kmem_alloc(sizeof(struct xfs_btree_ops), KM_NOFS);
+       nops = kmalloc(sizeof(struct xfs_btree_ops), GFP_NOFS | __GFP_NOFAIL);
        memcpy(nops, cur->bc_ops, sizeof(struct xfs_btree_ops));
        nops->alloc_block = xfs_btree_fakeroot_alloc_block;
        nops->free_block = xfs_btree_fakeroot_free_block;
@@ -220,7 +220,7 @@ xfs_btree_stage_ifakeroot(
        ASSERT(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE);
        ASSERT(cur->bc_tp == NULL);
 
-       nops = kmem_alloc(sizeof(struct xfs_btree_ops), KM_NOFS);
+       nops = kmalloc(sizeof(struct xfs_btree_ops), GFP_NOFS | __GFP_NOFAIL);
        memcpy(nops, cur->bc_ops, sizeof(struct xfs_btree_ops));
        nops->alloc_block = xfs_btree_fakeroot_alloc_block;
        nops->free_block = xfs_btree_fakeroot_free_block;
index 73aae65439060dd75bc676c30a353f5794843d8a..331b9251b1857496ecbb509034796b51a6a685cc 100644 (file)
@@ -2182,7 +2182,8 @@ xfs_da_grow_inode_int(
                 * If we didn't get it and the block might work if fragmented,
                 * try without the CONTIG flag.  Loop until we get it all.
                 */
-               mapp = kmem_alloc(sizeof(*mapp) * count, 0);
+               mapp = kmalloc(sizeof(*mapp) * count,
+                               GFP_KERNEL | __GFP_NOFAIL);
                for (b = *bno, mapi = 0; b < *bno + count; ) {
                        c = (int)(*bno + count - b);
                        nmap = min(XFS_BMAP_MAX_NMAP, c);
index 54915a302e96e4afa992745d5448992c7b7d8136..370d673004553e7ac2be6754af85bb13b89ea2e2 100644 (file)
@@ -333,7 +333,7 @@ xfs_dir_cilookup_result(
                                        !(args->op_flags & XFS_DA_OP_CILOOKUP))
                return -EEXIST;
 
-       args->value = kmem_alloc(len, KM_NOFS | KM_MAYFAIL);
+       args->value = kmalloc(len, GFP_NOFS | __GFP_RETRY_MAYFAIL);
        if (!args->value)
                return -ENOMEM;
 
index 3c256d4cc40b487c7c8909835b6dd3e0cf957fed..506c65caaec5d0a1550fdae78c340e2e5e924e4c 100644 (file)
@@ -1108,7 +1108,7 @@ xfs_dir2_sf_to_block(
         * Copy the directory into a temporary buffer.
         * Then pitch the incore inode data so we can make extents.
         */
-       sfp = kmem_alloc(ifp->if_bytes, 0);
+       sfp = kmalloc(ifp->if_bytes, GFP_KERNEL | __GFP_NOFAIL);
        memcpy(sfp, oldsfp, ifp->if_bytes);
 
        xfs_idata_realloc(dp, -ifp->if_bytes, XFS_DATA_FORK);
index e1f83fc7b6ad1108f52680e0fdf94f322feeed1a..7b1f41cff9e093ae28e06ca09d0ddd021f0f05e0 100644 (file)
@@ -276,7 +276,7 @@ xfs_dir2_block_to_sf(
         * format the data into.  Once we have formatted the data, we can free
         * the block and copy the formatted data into the inode literal area.
         */
-       sfp = kmem_alloc(mp->m_sb.sb_inodesize, 0);
+       sfp = kmalloc(mp->m_sb.sb_inodesize, GFP_KERNEL | __GFP_NOFAIL);
        memcpy(sfp, sfhp, xfs_dir2_sf_hdr_size(sfhp->i8count));
 
        /*
@@ -524,7 +524,7 @@ xfs_dir2_sf_addname_hard(
         * Copy the old directory to the stack buffer.
         */
        old_isize = (int)dp->i_disk_size;
-       buf = kmem_alloc(old_isize, 0);
+       buf = kmalloc(old_isize, GFP_KERNEL | __GFP_NOFAIL);
        oldsfp = (xfs_dir2_sf_hdr_t *)buf;
        memcpy(oldsfp, dp->i_df.if_data, old_isize);
        /*
@@ -1151,7 +1151,7 @@ xfs_dir2_sf_toino4(
         * Don't want xfs_idata_realloc copying the data here.
         */
        oldsize = dp->i_df.if_bytes;
-       buf = kmem_alloc(oldsize, 0);
+       buf = kmalloc(oldsize, GFP_KERNEL | __GFP_NOFAIL);
        ASSERT(oldsfp->i8count == 1);
        memcpy(buf, oldsfp, oldsize);
        /*
@@ -1223,7 +1223,7 @@ xfs_dir2_sf_toino8(
         * Don't want xfs_idata_realloc copying the data here.
         */
        oldsize = dp->i_df.if_bytes;
-       buf = kmem_alloc(oldsize, 0);
+       buf = kmalloc(oldsize, GFP_KERNEL | __GFP_NOFAIL);
        ASSERT(oldsfp->i8count == 0);
        memcpy(buf, oldsfp, oldsize);
        /*
index f4569e18a8d0ea538b62491c64cd979e011fd446..f3cf7f933e1520db1f6069b360aeb4206bcad9e8 100644 (file)
@@ -50,7 +50,7 @@ xfs_init_local_fork(
                mem_size++;
 
        if (size) {
-               char *new_data = kmem_alloc(mem_size, KM_NOFS);
+               char *new_data = kmalloc(mem_size, GFP_NOFS | __GFP_NOFAIL);
 
                memcpy(new_data, data, size);
                if (zero_terminate)
@@ -77,7 +77,7 @@ xfs_iformat_local(
        /*
         * If the size is unreasonable, then something
         * is wrong and we just bail out rather than crash in
-        * kmem_alloc() or memcpy() below.
+        * kmalloc() or memcpy() below.
         */
        if (unlikely(size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) {
                xfs_warn(ip->i_mount,
@@ -116,7 +116,7 @@ xfs_iformat_extents(
 
        /*
         * If the number of extents is unreasonable, then something is wrong and
-        * we just bail out rather than crash in kmem_alloc() or memcpy() below.
+        * we just bail out rather than crash in kmalloc() or memcpy() below.
         */
        if (unlikely(size < 0 || size > XFS_DFORK_SIZE(dip, mp, whichfork))) {
                xfs_warn(ip->i_mount, "corrupt inode %llu ((a)extents = %llu).",
@@ -205,7 +205,7 @@ xfs_iformat_btree(
        }
 
        ifp->if_broot_bytes = size;
-       ifp->if_broot = kmem_alloc(size, KM_NOFS);
+       ifp->if_broot = kmalloc(size, GFP_NOFS | __GFP_NOFAIL);
        ASSERT(ifp->if_broot != NULL);
        /*
         * Copy and convert from the on-disk structure
@@ -399,7 +399,8 @@ xfs_iroot_realloc(
                 */
                if (ifp->if_broot_bytes == 0) {
                        new_size = XFS_BMAP_BROOT_SPACE_CALC(mp, rec_diff);
-                       ifp->if_broot = kmem_alloc(new_size, KM_NOFS);
+                       ifp->if_broot = kmalloc(new_size,
+                                               GFP_NOFS | __GFP_NOFAIL);
                        ifp->if_broot_bytes = (int)new_size;
                        return;
                }
@@ -440,7 +441,7 @@ xfs_iroot_realloc(
        else
                new_size = 0;
        if (new_size > 0) {
-               new_broot = kmem_alloc(new_size, KM_NOFS);
+               new_broot = kmalloc(new_size, GFP_NOFS | __GFP_NOFAIL);
                /*
                 * First copy over the btree block header.
                 */
@@ -488,7 +489,7 @@ xfs_iroot_realloc(
  *
  * If the amount of space needed has decreased below the size of the
  * inline buffer, then switch to using the inline buffer.  Otherwise,
- * use kmem_realloc() or kmem_alloc() to adjust the size of the buffer
+ * use krealloc() or kmalloc() to adjust the size of the buffer
  * to what is needed.
  *
  * ip -- the inode whose if_data area is changing
index e368ad671e261e42159351316fc77cd5596742e4..5f7a44d21cc9dee316c3341286085d759f816ab2 100644 (file)
@@ -109,7 +109,7 @@ xfs_attr_shortform_list(
         * It didn't all fit, so we have to sort everything on hashval.
         */
        sbsize = sf->count * sizeof(*sbuf);
-       sbp = sbuf = kmem_alloc(sbsize, KM_NOFS);
+       sbp = sbuf = kmalloc(sbsize, GFP_NOFS | __GFP_NOFAIL);
 
        /*
         * Scan the attribute list for the rest of the entries, storing
index 5672665150eab40c2a14f6480c56550b2eeb6872..6f53eb2d3de019927e2178c41c744ad8b6be9e5b 100644 (file)
@@ -325,14 +325,14 @@ xfs_buf_alloc_kmem(
        struct xfs_buf  *bp,
        xfs_buf_flags_t flags)
 {
-       xfs_km_flags_t  kmflag_mask = KM_NOFS;
+       gfp_t           gfp_mask = GFP_NOFS | __GFP_NOFAIL;
        size_t          size = BBTOB(bp->b_length);
 
        /* Assure zeroed buffer for non-read cases. */
        if (!(flags & XBF_READ))
-               kmflag_mask |= KM_ZERO;
+               gfp_mask |= __GFP_ZERO;
 
-       bp->b_addr = kmem_alloc(size, kmflag_mask);
+       bp->b_addr = kmalloc(size, gfp_mask);
        if (!bp->b_addr)
                return -ENOMEM;
 
index 43167f543afc33f5471d0d11c27917d19ab9a13c..34776f4c05acc796feade8dec560696cfb08dc7c 100644 (file)
@@ -85,7 +85,7 @@ xlog_add_buffer_cancelled(
                return false;
        }
 
-       bcp = kmem_alloc(sizeof(struct xfs_buf_cancel), 0);
+       bcp = kmalloc(sizeof(struct xfs_buf_cancel), GFP_KERNEL | __GFP_NOFAIL);
        bcp->bc_blkno = blkno;
        bcp->bc_len = len;
        bcp->bc_refcount = 1;
index 2fc98d3137086db0a0203b18e588a659b2da5da7..e2a3c8d3fe4f9860c83760a554063785c8757fda 100644 (file)
@@ -313,7 +313,7 @@ xfs_filestream_create_association(
         * we return a referenced AG, the allocation can still go ahead just
         * fine.
         */
-       item = kmem_alloc(sizeof(*item), KM_MAYFAIL);
+       item = kmalloc(sizeof(*item), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
        if (!item)
                goto out_put_fstrms;
 
index 144198a6b2702c9f825bd9ad22fe10cd1085ea65..5d7b937179a0b9fba57e0326c8ef95275f684fa0 100644 (file)
@@ -291,7 +291,8 @@ xlog_recover_inode_commit_pass2(
        if (item->ri_buf[0].i_len == sizeof(struct xfs_inode_log_format)) {
                in_f = item->ri_buf[0].i_addr;
        } else {
-               in_f = kmem_alloc(sizeof(struct xfs_inode_log_format), 0);
+               in_f = kmalloc(sizeof(struct xfs_inode_log_format),
+                               GFP_KERNEL | __GFP_NOFAIL);
                need_free = 1;
                error = xfs_inode_item_format_convert(&item->ri_buf[0], in_f);
                if (error)
index 8dbb7c054b28975e2884bba073f451bb5fad3926..5dd622aa54c5dcdd77c18ecafefb3c30829b9802 100644 (file)
@@ -160,7 +160,7 @@ xfs_iwalk_alloc(
 
        /* Allocate a prefetch buffer for inobt records. */
        size = iwag->sz_recs * sizeof(struct xfs_inobt_rec_incore);
-       iwag->recs = kmem_alloc(size, KM_MAYFAIL);
+       iwag->recs = kmalloc(size, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
        if (iwag->recs == NULL)
                return -ENOMEM;
 
index 4a27ecdbb5463c1e5b53d373c15f5ec4067fe316..e3bd503edcab7aea1620772cb6b7e409f7977cd7 100644 (file)
@@ -2161,7 +2161,7 @@ xlog_recover_add_to_trans(
                return 0;
        }
 
-       ptr = kmem_alloc(len, 0);
+       ptr = xlog_kvmalloc(len);
        memcpy(ptr, dp, len);
        in_f = (struct xfs_inode_log_format *)ptr;
 
index 9c2528418b180a52707a9d5209a03303d8a211df..6b6b964544490429dea601c39552d7aa3ff665a6 100644 (file)
@@ -997,7 +997,8 @@ xfs_qm_reset_dqcounts_buf(
        if (qip->i_nblocks == 0)
                return 0;
 
-       map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), 0);
+       map = kmalloc(XFS_DQITER_MAP_SIZE * sizeof(*map),
+                       GFP_KERNEL | __GFP_NOFAIL);
 
        lblkno = 0;
        maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
index 8649d981a097d3f9724c705f72c2832da4829b9c..8a8d6197203edba29d624ab4e9ec21404af18cc7 100644 (file)
@@ -903,7 +903,7 @@ xfs_growfs_rt(
        /*
         * Allocate a new (fake) mount/sb.
         */
-       nmp = kmem_alloc(sizeof(*nmp), 0);
+       nmp = kmalloc(sizeof(*nmp), GFP_KERNEL | __GFP_NOFAIL);
        /*
         * Loop over the bitmap blocks.
         * We will do everything one bitmap block at a time.
index 5a2512d20bd07473a872592911ede7246b8c11b7..d167ba00e7cfc89632c443db26b19380a16f405a 100644 (file)
@@ -1987,7 +1987,7 @@ static int xfs_init_fs_context(
 {
        struct xfs_mount        *mp;
 
-       mp = kmem_alloc(sizeof(struct xfs_mount), KM_ZERO);
+       mp = kzalloc(sizeof(struct xfs_mount), GFP_KERNEL | __GFP_NOFAIL);
        if (!mp)
                return -ENOMEM;
 
index 0984a1c884c742ab1fbd7b1f9f57c800de14999e..c7e57efe0356662187f1cc3d2b53436a1f019264 100644 (file)
@@ -4040,31 +4040,6 @@ TRACE_EVENT(xfs_pwork_init,
                  __entry->nr_threads, __entry->pid)
 )
 
-DECLARE_EVENT_CLASS(xfs_kmem_class,
-       TP_PROTO(ssize_t size, int flags, unsigned long caller_ip),
-       TP_ARGS(size, flags, caller_ip),
-       TP_STRUCT__entry(
-               __field(ssize_t, size)
-               __field(int, flags)
-               __field(unsigned long, caller_ip)
-       ),
-       TP_fast_assign(
-               __entry->size = size;
-               __entry->flags = flags;
-               __entry->caller_ip = caller_ip;
-       ),
-       TP_printk("size %zd flags 0x%x caller %pS",
-                 __entry->size,
-                 __entry->flags,
-                 (char *)__entry->caller_ip)
-)
-
-#define DEFINE_KMEM_EVENT(name) \
-DEFINE_EVENT(xfs_kmem_class, name, \
-       TP_PROTO(ssize_t size, int flags, unsigned long caller_ip), \
-       TP_ARGS(size, flags, caller_ip))
-DEFINE_KMEM_EVENT(kmem_alloc);
-
 TRACE_EVENT(xfs_check_new_dalign,
        TP_PROTO(struct xfs_mount *mp, int new_dalign, xfs_ino_t calc_rootino),
        TP_ARGS(mp, new_dalign, calc_rootino),