summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2022-08-05 16:44:34 -0600
committerJens Axboe <axboe@kernel.dk>2022-08-07 09:00:56 -0600
commit5dcc5d3ca2212920d79a28e15b34329b7298cd97 (patch)
treebf0ece13a2750e8d8010a2d483091c288ef4ea1a
parentd09d4be9e0ff0cca335e461a6d28fd61131bb455 (diff)
block: use on-stack page vec for <= UIO_FASTIOVblock-test
Avoid a kmalloc+kfree for each page array, if we only have a few pages that are mapped. An alloc+free for each IO is quite expensive, and it's pretty pointless if we're only dealing with 1 or a few vecs. Use UIO_FASTIOV like we do in other spots to set a sane limit for how big of an IO we want to avoid allocations for. Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--block/blk-map.c14
1 files changed, 11 insertions, 3 deletions
diff --git a/block/blk-map.c b/block/blk-map.c
index 5da03f2614eb..35afd47f79f6 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -268,12 +268,19 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
}
while (iov_iter_count(iter)) {
- struct page **pages;
+ struct page **pages, *stack_pages[UIO_FASTIOV];
ssize_t bytes;
size_t offs, added = 0;
int npages;
- bytes = iov_iter_get_pages_alloc(iter, &pages, LONG_MAX, &offs);
+ if (nr_vecs <= ARRAY_SIZE(stack_pages)) {
+ pages = stack_pages;
+ bytes = iov_iter_get_pages(iter, pages, LONG_MAX,
+ nr_vecs, &offs);
+ } else {
+ bytes = iov_iter_get_pages_alloc(iter, &pages, LONG_MAX,
+ &offs);
+ }
if (unlikely(bytes <= 0)) {
ret = bytes ? bytes : -EFAULT;
goto out_unmap;
@@ -310,7 +317,8 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
*/
while (j < npages)
put_page(pages[j++]);
- kvfree(pages);
+ if (pages != stack_pages)
+ kvfree(pages);
/* couldn't stuff something into bio? */
if (bytes)
break;