io_uring/rsrc: don't rely on user vaddr alignment
authorPavel Begunkov <asml.silence@gmail.com>
Tue, 24 Jun 2025 13:40:34 +0000 (14:40 +0100)
committerJens Axboe <axboe@kernel.dk>
Wed, 25 Jun 2025 02:50:59 +0000 (20:50 -0600)
There is no guaranteed alignment for user pointers, however the
calculation of an offset of the first page into a folio after coalescing
uses some weird bit mask logic, get rid of it.

Cc: stable@vger.kernel.org
Reported-by: David Hildenbrand <david@redhat.com>
Fixes: a8edbb424b139 ("io_uring/rsrc: enable multi-hugepage buffer coalescing")
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/io-uring/e387b4c78b33f231105a601d84eefd8301f57954.1750771718.git.asml.silence@gmail.com/
Signed-off-by: Jens Axboe <axboe@kernel.dk>
io_uring/rsrc.c
io_uring/rsrc.h

index 0c09e38784c945e4ff848160ce1508096a928753..afc67530f9127b81dc3d68ab0c243da03b92c4c1 100644 (file)
@@ -734,6 +734,7 @@ bool io_check_coalesce_buffer(struct page **page_array, int nr_pages,
 
        data->nr_pages_mid = folio_nr_pages(folio);
        data->folio_shift = folio_shift(folio);
+       data->first_folio_page_idx = folio_page_idx(folio, page_array[0]);
 
        /*
         * Check if pages are contiguous inside a folio, and all folios have
@@ -827,7 +828,11 @@ static struct io_rsrc_node *io_sqe_buffer_register(struct io_ring_ctx *ctx,
        if (coalesced)
                imu->folio_shift = data.folio_shift;
        refcount_set(&imu->refs, 1);
-       off = (unsigned long) iov->iov_base & ((1UL << imu->folio_shift) - 1);
+
+       off = (unsigned long)iov->iov_base & ~PAGE_MASK;
+       if (coalesced)
+               off += data.first_folio_page_idx << PAGE_SHIFT;
+
        node->buf = imu;
        ret = 0;
 
index 0d2138f16322b21662034a029f905bd1b3159ee4..25e7e998dcfd0a463bc98e81a77148637801cc23 100644 (file)
@@ -49,6 +49,7 @@ struct io_imu_folio_data {
        unsigned int    nr_pages_mid;
        unsigned int    folio_shift;
        unsigned int    nr_folios;
+       unsigned long   first_folio_page_idx;
 };
 
 bool io_rsrc_cache_init(struct io_ring_ctx *ctx);