bcachefs: Use GFP_KERNEL for promote allocations
authorKent Overstreet <kent.overstreet@linux.dev>
Wed, 20 Dec 2023 07:38:10 +0000 (02:38 -0500)
committerKent Overstreet <kent.overstreet@linux.dev>
Mon, 1 Jan 2024 16:47:42 +0000 (11:47 -0500)
We already have btree locks dropped here - no need for GFP_NOFS.

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
fs/bcachefs/io_read.c

index 4c9eaf7cea8df07835e9b217dff097b73581fa60..88aa004eade8692d2555b6d74025f37858959d5d 100644 (file)
@@ -174,7 +174,7 @@ static struct promote_op *__promote_alloc(struct btree_trans *trans,
        if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_promote))
                return NULL;
 
-       op = kzalloc(sizeof(*op) + sizeof(struct bio_vec) * pages, GFP_NOFS);
+       op = kzalloc(sizeof(*op) + sizeof(struct bio_vec) * pages, GFP_KERNEL);
        if (!op)
                goto err;
 
@@ -187,7 +187,7 @@ static struct promote_op *__promote_alloc(struct btree_trans *trans,
         */
        *rbio = kzalloc(sizeof(struct bch_read_bio) +
                        sizeof(struct bio_vec) * pages,
-                       GFP_NOFS);
+                       GFP_KERNEL);
        if (!*rbio)
                goto err;
 
@@ -195,7 +195,7 @@ static struct promote_op *__promote_alloc(struct btree_trans *trans,
        bio_init(&(*rbio)->bio, NULL, (*rbio)->bio.bi_inline_vecs, pages, 0);
 
        if (bch2_bio_alloc_pages(&(*rbio)->bio, sectors << 9,
-                                GFP_NOFS))
+                                GFP_KERNEL))
                goto err;
 
        (*rbio)->bounce         = true;