block: fix use-after-free of q->q_usage_counter
[linux-block.git] / block / bio.c
CommitLineData
8c16567d 1// SPDX-License-Identifier: GPL-2.0
1da177e4 2/*
0fe23479 3 * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk>
1da177e4
LT
4 */
5#include <linux/mm.h>
6#include <linux/swap.h>
7#include <linux/bio.h>
8#include <linux/blkdev.h>
a27bb332 9#include <linux/uio.h>
852c788f 10#include <linux/iocontext.h>
1da177e4
LT
11#include <linux/slab.h>
12#include <linux/init.h>
13#include <linux/kernel.h>
630d9c47 14#include <linux/export.h>
1da177e4
LT
15#include <linux/mempool.h>
16#include <linux/workqueue.h>
852c788f 17#include <linux/cgroup.h>
b4c5875d 18#include <linux/highmem.h>
de6a78b6 19#include <linux/sched/sysctl.h>
a892c8d5 20#include <linux/blk-crypto.h>
49d1ec85 21#include <linux/xarray.h>
1da177e4 22
55782138 23#include <trace/events/block.h>
9e234eea 24#include "blk.h"
67b42d0b 25#include "blk-rq-qos.h"
672fdcf0 26#include "blk-cgroup.h"
0bfc2455 27
b99182c5
PB
28#define ALLOC_CACHE_THRESHOLD 16
29#define ALLOC_CACHE_SLACK 64
42b2b2fb 30#define ALLOC_CACHE_MAX 256
b99182c5 31
be4d234d 32struct bio_alloc_cache {
fcade2ce 33 struct bio *free_list;
b99182c5 34 struct bio *free_list_irq;
be4d234d 35 unsigned int nr;
b99182c5 36 unsigned int nr_irq;
be4d234d
JA
37};
38
de76fd89 39static struct biovec_slab {
6ac0b715
CH
40 int nr_vecs;
41 char *name;
42 struct kmem_cache *slab;
de76fd89
CH
43} bvec_slabs[] __read_mostly = {
44 { .nr_vecs = 16, .name = "biovec-16" },
45 { .nr_vecs = 64, .name = "biovec-64" },
46 { .nr_vecs = 128, .name = "biovec-128" },
a8affc03 47 { .nr_vecs = BIO_MAX_VECS, .name = "biovec-max" },
1da177e4 48};
6ac0b715 49
7a800a20
CH
50static struct biovec_slab *biovec_slab(unsigned short nr_vecs)
51{
52 switch (nr_vecs) {
53 /* smaller bios use inline vecs */
54 case 5 ... 16:
55 return &bvec_slabs[0];
56 case 17 ... 64:
57 return &bvec_slabs[1];
58 case 65 ... 128:
59 return &bvec_slabs[2];
a8affc03 60 case 129 ... BIO_MAX_VECS:
7a800a20
CH
61 return &bvec_slabs[3];
62 default:
63 BUG();
64 return NULL;
65 }
66}
1da177e4 67
1da177e4
LT
68/*
69 * fs_bio_set is the bio_set containing bio and iovec memory pools used by
70 * IO code that does not need private memory pools.
71 */
f4f8154a 72struct bio_set fs_bio_set;
3f86a82a 73EXPORT_SYMBOL(fs_bio_set);
1da177e4 74
bb799ca0
JA
75/*
76 * Our slab pool management
77 */
78struct bio_slab {
79 struct kmem_cache *slab;
80 unsigned int slab_ref;
81 unsigned int slab_size;
82 char name[8];
83};
84static DEFINE_MUTEX(bio_slab_lock);
49d1ec85 85static DEFINE_XARRAY(bio_slabs);
bb799ca0 86
49d1ec85 87static struct bio_slab *create_bio_slab(unsigned int size)
bb799ca0 88{
49d1ec85 89 struct bio_slab *bslab = kzalloc(sizeof(*bslab), GFP_KERNEL);
bb799ca0 90
49d1ec85
ML
91 if (!bslab)
92 return NULL;
bb799ca0 93
49d1ec85
ML
94 snprintf(bslab->name, sizeof(bslab->name), "bio-%d", size);
95 bslab->slab = kmem_cache_create(bslab->name, size,
1a7e76e4
CH
96 ARCH_KMALLOC_MINALIGN,
97 SLAB_HWCACHE_ALIGN | SLAB_TYPESAFE_BY_RCU, NULL);
49d1ec85
ML
98 if (!bslab->slab)
99 goto fail_alloc_slab;
bb799ca0 100
49d1ec85
ML
101 bslab->slab_ref = 1;
102 bslab->slab_size = size;
bb799ca0 103
49d1ec85
ML
104 if (!xa_err(xa_store(&bio_slabs, size, bslab, GFP_KERNEL)))
105 return bslab;
bb799ca0 106
49d1ec85 107 kmem_cache_destroy(bslab->slab);
bb799ca0 108
49d1ec85
ML
109fail_alloc_slab:
110 kfree(bslab);
111 return NULL;
112}
bb799ca0 113
49d1ec85
ML
114static inline unsigned int bs_bio_slab_size(struct bio_set *bs)
115{
9f180e31 116 return bs->front_pad + sizeof(struct bio) + bs->back_pad;
49d1ec85 117}
bb799ca0 118
49d1ec85
ML
119static struct kmem_cache *bio_find_or_create_slab(struct bio_set *bs)
120{
121 unsigned int size = bs_bio_slab_size(bs);
122 struct bio_slab *bslab;
bb799ca0 123
49d1ec85
ML
124 mutex_lock(&bio_slab_lock);
125 bslab = xa_load(&bio_slabs, size);
126 if (bslab)
127 bslab->slab_ref++;
128 else
129 bslab = create_bio_slab(size);
bb799ca0 130 mutex_unlock(&bio_slab_lock);
49d1ec85
ML
131
132 if (bslab)
133 return bslab->slab;
134 return NULL;
bb799ca0
JA
135}
136
137static void bio_put_slab(struct bio_set *bs)
138{
139 struct bio_slab *bslab = NULL;
49d1ec85 140 unsigned int slab_size = bs_bio_slab_size(bs);
bb799ca0
JA
141
142 mutex_lock(&bio_slab_lock);
143
49d1ec85 144 bslab = xa_load(&bio_slabs, slab_size);
bb799ca0
JA
145 if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n"))
146 goto out;
147
49d1ec85
ML
148 WARN_ON_ONCE(bslab->slab != bs->bio_slab);
149
bb799ca0
JA
150 WARN_ON(!bslab->slab_ref);
151
152 if (--bslab->slab_ref)
153 goto out;
154
49d1ec85
ML
155 xa_erase(&bio_slabs, slab_size);
156
bb799ca0 157 kmem_cache_destroy(bslab->slab);
49d1ec85 158 kfree(bslab);
bb799ca0
JA
159
160out:
161 mutex_unlock(&bio_slab_lock);
162}
163
7a800a20 164void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned short nr_vecs)
7ba1ba12 165{
9e8c0d0d 166 BUG_ON(nr_vecs > BIO_MAX_VECS);
ed996a52 167
a8affc03 168 if (nr_vecs == BIO_MAX_VECS)
9f060e22 169 mempool_free(bv, pool);
7a800a20
CH
170 else if (nr_vecs > BIO_INLINE_VECS)
171 kmem_cache_free(biovec_slab(nr_vecs)->slab, bv);
bb799ca0 172}
bb799ca0 173
f2c3eb9b
CH
174/*
175 * Make the first allocation restricted and don't dump info on allocation
176 * failures, since we'll fall back to the mempool in case of failure.
177 */
178static inline gfp_t bvec_alloc_gfp(gfp_t gfp)
179{
180 return (gfp & ~(__GFP_DIRECT_RECLAIM | __GFP_IO)) |
181 __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
bb799ca0
JA
182}
183
7a800a20
CH
184struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs,
185 gfp_t gfp_mask)
1da177e4 186{
7a800a20 187 struct biovec_slab *bvs = biovec_slab(*nr_vecs);
1da177e4 188
7a800a20 189 if (WARN_ON_ONCE(!bvs))
7ff9345f 190 return NULL;
7ff9345f
JA
191
192 /*
7a800a20
CH
193 * Upgrade the nr_vecs request to take full advantage of the allocation.
194 * We also rely on this in the bvec_free path.
7ff9345f 195 */
7a800a20 196 *nr_vecs = bvs->nr_vecs;
7ff9345f 197
7ff9345f 198 /*
f007a3d6
CH
199 * Try a slab allocation first for all smaller allocations. If that
200 * fails and __GFP_DIRECT_RECLAIM is set retry with the mempool.
a8affc03 201 * The mempool is sized to handle up to BIO_MAX_VECS entries.
7ff9345f 202 */
a8affc03 203 if (*nr_vecs < BIO_MAX_VECS) {
f007a3d6 204 struct bio_vec *bvl;
1da177e4 205
f2c3eb9b 206 bvl = kmem_cache_alloc(bvs->slab, bvec_alloc_gfp(gfp_mask));
7a800a20 207 if (likely(bvl) || !(gfp_mask & __GFP_DIRECT_RECLAIM))
f007a3d6 208 return bvl;
a8affc03 209 *nr_vecs = BIO_MAX_VECS;
7ff9345f
JA
210 }
211
f007a3d6 212 return mempool_alloc(pool, gfp_mask);
1da177e4
LT
213}
214
9ae3b3f5 215void bio_uninit(struct bio *bio)
1da177e4 216{
db9819c7
CH
217#ifdef CONFIG_BLK_CGROUP
218 if (bio->bi_blkg) {
219 blkg_put(bio->bi_blkg);
220 bio->bi_blkg = NULL;
221 }
222#endif
ece841ab
JT
223 if (bio_integrity(bio))
224 bio_integrity_free(bio);
a892c8d5
ST
225
226 bio_crypt_free_ctx(bio);
4254bba1 227}
9ae3b3f5 228EXPORT_SYMBOL(bio_uninit);
7ba1ba12 229
4254bba1
KO
230static void bio_free(struct bio *bio)
231{
232 struct bio_set *bs = bio->bi_pool;
066ff571 233 void *p = bio;
4254bba1 234
066ff571 235 WARN_ON_ONCE(!bs);
4254bba1 236
066ff571
CH
237 bio_uninit(bio);
238 bvec_free(&bs->bvec_pool, bio->bi_io_vec, bio->bi_max_vecs);
239 mempool_free(p - bs->front_pad, &bs->bio_pool);
3676347a
PO
240}
241
9ae3b3f5
JA
242/*
243 * Users of this function have their own bio allocation. Subsequently,
244 * they must remember to pair any call to bio_init() with bio_uninit()
245 * when IO has completed, or when the bio is released.
246 */
49add496 247void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table,
16458cf3 248 unsigned short max_vecs, blk_opf_t opf)
1da177e4 249{
da521626 250 bio->bi_next = NULL;
49add496
CH
251 bio->bi_bdev = bdev;
252 bio->bi_opf = opf;
da521626
JA
253 bio->bi_flags = 0;
254 bio->bi_ioprio = 0;
da521626
JA
255 bio->bi_status = 0;
256 bio->bi_iter.bi_sector = 0;
257 bio->bi_iter.bi_size = 0;
258 bio->bi_iter.bi_idx = 0;
259 bio->bi_iter.bi_bvec_done = 0;
260 bio->bi_end_io = NULL;
261 bio->bi_private = NULL;
262#ifdef CONFIG_BLK_CGROUP
263 bio->bi_blkg = NULL;
264 bio->bi_issue.value = 0;
49add496
CH
265 if (bdev)
266 bio_associate_blkg(bio);
da521626
JA
267#ifdef CONFIG_BLK_CGROUP_IOCOST
268 bio->bi_iocost_cost = 0;
269#endif
270#endif
271#ifdef CONFIG_BLK_INLINE_ENCRYPTION
272 bio->bi_crypt_context = NULL;
273#endif
274#ifdef CONFIG_BLK_DEV_INTEGRITY
275 bio->bi_integrity = NULL;
276#endif
277 bio->bi_vcnt = 0;
278
c4cf5261 279 atomic_set(&bio->__bi_remaining, 1);
dac56212 280 atomic_set(&bio->__bi_cnt, 1);
3e08773c 281 bio->bi_cookie = BLK_QC_T_NONE;
3a83f467 282
3a83f467 283 bio->bi_max_vecs = max_vecs;
da521626
JA
284 bio->bi_io_vec = table;
285 bio->bi_pool = NULL;
1da177e4 286}
a112a71d 287EXPORT_SYMBOL(bio_init);
1da177e4 288
f44b48c7
KO
289/**
290 * bio_reset - reinitialize a bio
291 * @bio: bio to reset
a7c50c94
CH
292 * @bdev: block device to use the bio for
293 * @opf: operation and flags for bio
f44b48c7
KO
294 *
295 * Description:
296 * After calling bio_reset(), @bio will be in the same state as a freshly
297 * allocated bio returned bio bio_alloc_bioset() - the only fields that are
298 * preserved are the ones that are initialized by bio_alloc_bioset(). See
299 * comment in struct bio.
300 */
16458cf3 301void bio_reset(struct bio *bio, struct block_device *bdev, blk_opf_t opf)
f44b48c7 302{
9ae3b3f5 303 bio_uninit(bio);
f44b48c7 304 memset(bio, 0, BIO_RESET_BYTES);
c4cf5261 305 atomic_set(&bio->__bi_remaining, 1);
a7c50c94 306 bio->bi_bdev = bdev;
78e34374
CH
307 if (bio->bi_bdev)
308 bio_associate_blkg(bio);
a7c50c94 309 bio->bi_opf = opf;
f44b48c7
KO
310}
311EXPORT_SYMBOL(bio_reset);
312
38f8baae 313static struct bio *__bio_chain_endio(struct bio *bio)
196d38bc 314{
4246a0b6
CH
315 struct bio *parent = bio->bi_private;
316
3edf5346 317 if (bio->bi_status && !parent->bi_status)
4e4cbee9 318 parent->bi_status = bio->bi_status;
196d38bc 319 bio_put(bio);
38f8baae
CH
320 return parent;
321}
322
323static void bio_chain_endio(struct bio *bio)
324{
325 bio_endio(__bio_chain_endio(bio));
196d38bc
KO
326}
327
328/**
329 * bio_chain - chain bio completions
1051a902 330 * @bio: the target bio
5b874af6 331 * @parent: the parent bio of @bio
196d38bc
KO
332 *
333 * The caller won't have a bi_end_io called when @bio completes - instead,
334 * @parent's bi_end_io won't be called until both @parent and @bio have
335 * completed; the chained bio will also be freed when it completes.
336 *
337 * The caller must not set bi_private or bi_end_io in @bio.
338 */
339void bio_chain(struct bio *bio, struct bio *parent)
340{
341 BUG_ON(bio->bi_private || bio->bi_end_io);
342
343 bio->bi_private = parent;
344 bio->bi_end_io = bio_chain_endio;
c4cf5261 345 bio_inc_remaining(parent);
196d38bc
KO
346}
347EXPORT_SYMBOL(bio_chain);
348
0a3140ea 349struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev,
16458cf3 350 unsigned int nr_pages, blk_opf_t opf, gfp_t gfp)
3b005bf6 351{
07888c66 352 struct bio *new = bio_alloc(bdev, nr_pages, opf, gfp);
0a3140ea 353
3b005bf6
CH
354 if (bio) {
355 bio_chain(bio, new);
356 submit_bio(bio);
357 }
358
359 return new;
360}
361EXPORT_SYMBOL_GPL(blk_next_bio);
362
df2cb6da
KO
363static void bio_alloc_rescue(struct work_struct *work)
364{
365 struct bio_set *bs = container_of(work, struct bio_set, rescue_work);
366 struct bio *bio;
367
368 while (1) {
369 spin_lock(&bs->rescue_lock);
370 bio = bio_list_pop(&bs->rescue_list);
371 spin_unlock(&bs->rescue_lock);
372
373 if (!bio)
374 break;
375
ed00aabd 376 submit_bio_noacct(bio);
df2cb6da
KO
377 }
378}
379
380static void punt_bios_to_rescuer(struct bio_set *bs)
381{
382 struct bio_list punt, nopunt;
383 struct bio *bio;
384
47e0fb46
N
385 if (WARN_ON_ONCE(!bs->rescue_workqueue))
386 return;
df2cb6da
KO
387 /*
388 * In order to guarantee forward progress we must punt only bios that
389 * were allocated from this bio_set; otherwise, if there was a bio on
390 * there for a stacking driver higher up in the stack, processing it
391 * could require allocating bios from this bio_set, and doing that from
392 * our own rescuer would be bad.
393 *
394 * Since bio lists are singly linked, pop them all instead of trying to
395 * remove from the middle of the list:
396 */
397
398 bio_list_init(&punt);
399 bio_list_init(&nopunt);
400
f5fe1b51 401 while ((bio = bio_list_pop(&current->bio_list[0])))
df2cb6da 402 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
f5fe1b51 403 current->bio_list[0] = nopunt;
df2cb6da 404
f5fe1b51
N
405 bio_list_init(&nopunt);
406 while ((bio = bio_list_pop(&current->bio_list[1])))
407 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
408 current->bio_list[1] = nopunt;
df2cb6da
KO
409
410 spin_lock(&bs->rescue_lock);
411 bio_list_merge(&bs->rescue_list, &punt);
412 spin_unlock(&bs->rescue_lock);
413
414 queue_work(bs->rescue_workqueue, &bs->rescue_work);
415}
416
b99182c5
PB
417static void bio_alloc_irq_cache_splice(struct bio_alloc_cache *cache)
418{
419 unsigned long flags;
420
421 /* cache->free_list must be empty */
422 if (WARN_ON_ONCE(cache->free_list))
423 return;
424
425 local_irq_save(flags);
426 cache->free_list = cache->free_list_irq;
427 cache->free_list_irq = NULL;
428 cache->nr += cache->nr_irq;
429 cache->nr_irq = 0;
430 local_irq_restore(flags);
431}
432
0df71650 433static struct bio *bio_alloc_percpu_cache(struct block_device *bdev,
16458cf3 434 unsigned short nr_vecs, blk_opf_t opf, gfp_t gfp,
0df71650
MS
435 struct bio_set *bs)
436{
437 struct bio_alloc_cache *cache;
438 struct bio *bio;
439
440 cache = per_cpu_ptr(bs->cache, get_cpu());
441 if (!cache->free_list) {
b99182c5
PB
442 if (READ_ONCE(cache->nr_irq) >= ALLOC_CACHE_THRESHOLD)
443 bio_alloc_irq_cache_splice(cache);
444 if (!cache->free_list) {
445 put_cpu();
446 return NULL;
447 }
0df71650
MS
448 }
449 bio = cache->free_list;
450 cache->free_list = bio->bi_next;
451 cache->nr--;
452 put_cpu();
453
454 bio_init(bio, bdev, nr_vecs ? bio->bi_inline_vecs : NULL, nr_vecs, opf);
455 bio->bi_pool = bs;
456 return bio;
457}
458
1da177e4
LT
459/**
460 * bio_alloc_bioset - allocate a bio for I/O
609be106
CH
461 * @bdev: block device to allocate the bio for (can be %NULL)
462 * @nr_vecs: number of bvecs to pre-allocate
463 * @opf: operation and flags for bio
519c8e9f 464 * @gfp_mask: the GFP_* mask given to the slab allocator
db18efac 465 * @bs: the bio_set to allocate from.
1da177e4 466 *
3175199a 467 * Allocate a bio from the mempools in @bs.
3f86a82a 468 *
3175199a
CH
469 * If %__GFP_DIRECT_RECLAIM is set then bio_alloc will always be able to
470 * allocate a bio. This is due to the mempool guarantees. To make this work,
471 * callers must never allocate more than 1 bio at a time from the general pool.
472 * Callers that need to allocate more than 1 bio must always submit the
473 * previously allocated bio for IO before attempting to allocate a new one.
474 * Failure to do so can cause deadlocks under memory pressure.
3f86a82a 475 *
3175199a
CH
476 * Note that when running under submit_bio_noacct() (i.e. any block driver),
477 * bios are not submitted until after you return - see the code in
478 * submit_bio_noacct() that converts recursion into iteration, to prevent
479 * stack overflows.
df2cb6da 480 *
3175199a
CH
481 * This would normally mean allocating multiple bios under submit_bio_noacct()
482 * would be susceptible to deadlocks, but we have
483 * deadlock avoidance code that resubmits any blocked bios from a rescuer
484 * thread.
df2cb6da 485 *
3175199a
CH
486 * However, we do not guarantee forward progress for allocations from other
487 * mempools. Doing multiple allocations from the same mempool under
488 * submit_bio_noacct() should be avoided - instead, use bio_set's front_pad
489 * for per bio allocations.
df2cb6da 490 *
3175199a 491 * Returns: Pointer to new bio on success, NULL on failure.
3f86a82a 492 */
609be106 493struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs,
16458cf3 494 blk_opf_t opf, gfp_t gfp_mask,
7a88fa19 495 struct bio_set *bs)
1da177e4 496{
df2cb6da 497 gfp_t saved_gfp = gfp_mask;
451a9ebf
TH
498 struct bio *bio;
499 void *p;
500
609be106
CH
501 /* should not use nobvec bioset for nr_vecs > 0 */
502 if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) && nr_vecs > 0))
3175199a 503 return NULL;
df2cb6da 504
0df71650
MS
505 if (opf & REQ_ALLOC_CACHE) {
506 if (bs->cache && nr_vecs <= BIO_INLINE_VECS) {
507 bio = bio_alloc_percpu_cache(bdev, nr_vecs, opf,
508 gfp_mask, bs);
509 if (bio)
510 return bio;
511 /*
512 * No cached bio available, bio returned below marked with
513 * REQ_ALLOC_CACHE to particpate in per-cpu alloc cache.
514 */
515 } else {
516 opf &= ~REQ_ALLOC_CACHE;
517 }
518 }
519
3175199a
CH
520 /*
521 * submit_bio_noacct() converts recursion to iteration; this means if
522 * we're running beneath it, any bios we allocate and submit will not be
523 * submitted (and thus freed) until after we return.
524 *
525 * This exposes us to a potential deadlock if we allocate multiple bios
526 * from the same bio_set() while running underneath submit_bio_noacct().
527 * If we were to allocate multiple bios (say a stacking block driver
528 * that was splitting bios), we would deadlock if we exhausted the
529 * mempool's reserve.
530 *
531 * We solve this, and guarantee forward progress, with a rescuer
532 * workqueue per bio_set. If we go to allocate and there are bios on
533 * current->bio_list, we first try the allocation without
534 * __GFP_DIRECT_RECLAIM; if that fails, we punt those bios we would be
535 * blocking to the rescuer workqueue before we retry with the original
536 * gfp_flags.
537 */
538 if (current->bio_list &&
539 (!bio_list_empty(&current->bio_list[0]) ||
540 !bio_list_empty(&current->bio_list[1])) &&
541 bs->rescue_workqueue)
542 gfp_mask &= ~__GFP_DIRECT_RECLAIM;
543
544 p = mempool_alloc(&bs->bio_pool, gfp_mask);
545 if (!p && gfp_mask != saved_gfp) {
546 punt_bios_to_rescuer(bs);
547 gfp_mask = saved_gfp;
8aa6ba2f 548 p = mempool_alloc(&bs->bio_pool, gfp_mask);
3f86a82a 549 }
451a9ebf
TH
550 if (unlikely(!p))
551 return NULL;
759aa12f
PB
552 if (!mempool_is_saturated(&bs->bio_pool))
553 opf &= ~REQ_ALLOC_CACHE;
1da177e4 554
3175199a 555 bio = p + bs->front_pad;
609be106 556 if (nr_vecs > BIO_INLINE_VECS) {
3175199a 557 struct bio_vec *bvl = NULL;
34053979 558
609be106 559 bvl = bvec_alloc(&bs->bvec_pool, &nr_vecs, gfp_mask);
df2cb6da
KO
560 if (!bvl && gfp_mask != saved_gfp) {
561 punt_bios_to_rescuer(bs);
562 gfp_mask = saved_gfp;
609be106 563 bvl = bvec_alloc(&bs->bvec_pool, &nr_vecs, gfp_mask);
df2cb6da 564 }
34053979
IM
565 if (unlikely(!bvl))
566 goto err_free;
a38352e0 567
49add496 568 bio_init(bio, bdev, bvl, nr_vecs, opf);
609be106 569 } else if (nr_vecs) {
49add496 570 bio_init(bio, bdev, bio->bi_inline_vecs, BIO_INLINE_VECS, opf);
3175199a 571 } else {
49add496 572 bio_init(bio, bdev, NULL, 0, opf);
1da177e4 573 }
3f86a82a
KO
574
575 bio->bi_pool = bs;
1da177e4 576 return bio;
34053979
IM
577
578err_free:
8aa6ba2f 579 mempool_free(p, &bs->bio_pool);
34053979 580 return NULL;
1da177e4 581}
a112a71d 582EXPORT_SYMBOL(bio_alloc_bioset);
1da177e4 583
3175199a 584/**
066ff571
CH
585 * bio_kmalloc - kmalloc a bio
586 * @nr_vecs: number of bio_vecs to allocate
3175199a 587 * @gfp_mask: the GFP_* mask given to the slab allocator
3175199a 588 *
066ff571
CH
589 * Use kmalloc to allocate a bio (including bvecs). The bio must be initialized
590 * using bio_init() before use. To free a bio returned from this function use
591 * kfree() after calling bio_uninit(). A bio returned from this function can
592 * be reused by calling bio_uninit() before calling bio_init() again.
593 *
594 * Note that unlike bio_alloc() or bio_alloc_bioset() allocations from this
340e1347 595 * function are not backed by a mempool can fail. Do not use this function
066ff571 596 * for allocations in the file system I/O path.
3175199a
CH
597 *
598 * Returns: Pointer to new bio on success, NULL on failure.
599 */
066ff571 600struct bio *bio_kmalloc(unsigned short nr_vecs, gfp_t gfp_mask)
3175199a
CH
601{
602 struct bio *bio;
603
066ff571 604 if (nr_vecs > UIO_MAXIOV)
3175199a 605 return NULL;
066ff571 606 return kmalloc(struct_size(bio, bi_inline_vecs, nr_vecs), gfp_mask);
3175199a
CH
607}
608EXPORT_SYMBOL(bio_kmalloc);
609
6f822e1b 610void zero_fill_bio(struct bio *bio)
1da177e4 611{
7988613b
KO
612 struct bio_vec bv;
613 struct bvec_iter iter;
1da177e4 614
ab6c340e
CH
615 bio_for_each_segment(bv, bio, iter)
616 memzero_bvec(&bv);
1da177e4 617}
6f822e1b 618EXPORT_SYMBOL(zero_fill_bio);
1da177e4 619
83c9c547
ML
620/**
621 * bio_truncate - truncate the bio to small size of @new_size
622 * @bio: the bio to be truncated
623 * @new_size: new size for truncating the bio
624 *
625 * Description:
626 * Truncate the bio to new size of @new_size. If bio_op(bio) is
627 * REQ_OP_READ, zero the truncated part. This function should only
628 * be used for handling corner cases, such as bio eod.
629 */
4f7ab09a 630static void bio_truncate(struct bio *bio, unsigned new_size)
85a8ce62
ML
631{
632 struct bio_vec bv;
633 struct bvec_iter iter;
634 unsigned int done = 0;
635 bool truncated = false;
636
637 if (new_size >= bio->bi_iter.bi_size)
638 return;
639
83c9c547 640 if (bio_op(bio) != REQ_OP_READ)
85a8ce62
ML
641 goto exit;
642
643 bio_for_each_segment(bv, bio, iter) {
644 if (done + bv.bv_len > new_size) {
645 unsigned offset;
646
647 if (!truncated)
648 offset = new_size - done;
649 else
650 offset = 0;
3ee859e3
OH
651 zero_user(bv.bv_page, bv.bv_offset + offset,
652 bv.bv_len - offset);
85a8ce62
ML
653 truncated = true;
654 }
655 done += bv.bv_len;
656 }
657
658 exit:
659 /*
660 * Don't touch bvec table here and make it really immutable, since
661 * fs bio user has to retrieve all pages via bio_for_each_segment_all
662 * in its .end_bio() callback.
663 *
664 * It is enough to truncate bio by updating .bi_size since we can make
665 * correct bvec with the updated .bi_size for drivers.
666 */
667 bio->bi_iter.bi_size = new_size;
668}
669
29125ed6
CH
670/**
671 * guard_bio_eod - truncate a BIO to fit the block device
672 * @bio: bio to truncate
673 *
674 * This allows us to do IO even on the odd last sectors of a device, even if the
675 * block size is some multiple of the physical sector size.
676 *
677 * We'll just truncate the bio to the size of the device, and clear the end of
678 * the buffer head manually. Truly out-of-range accesses will turn into actual
679 * I/O errors, this only handles the "we need to be able to do I/O at the final
680 * sector" case.
681 */
682void guard_bio_eod(struct bio *bio)
683{
309dca30 684 sector_t maxsector = bdev_nr_sectors(bio->bi_bdev);
29125ed6
CH
685
686 if (!maxsector)
687 return;
688
689 /*
690 * If the *whole* IO is past the end of the device,
691 * let it through, and the IO layer will turn it into
692 * an EIO.
693 */
694 if (unlikely(bio->bi_iter.bi_sector >= maxsector))
695 return;
696
697 maxsector -= bio->bi_iter.bi_sector;
698 if (likely((bio->bi_iter.bi_size >> 9) <= maxsector))
699 return;
700
701 bio_truncate(bio, maxsector << 9);
702}
703
b99182c5
PB
704static int __bio_alloc_cache_prune(struct bio_alloc_cache *cache,
705 unsigned int nr)
be4d234d
JA
706{
707 unsigned int i = 0;
708 struct bio *bio;
709
fcade2ce
JA
710 while ((bio = cache->free_list) != NULL) {
711 cache->free_list = bio->bi_next;
be4d234d
JA
712 cache->nr--;
713 bio_free(bio);
714 if (++i == nr)
715 break;
716 }
b99182c5
PB
717 return i;
718}
719
720static void bio_alloc_cache_prune(struct bio_alloc_cache *cache,
721 unsigned int nr)
722{
723 nr -= __bio_alloc_cache_prune(cache, nr);
724 if (!READ_ONCE(cache->free_list)) {
725 bio_alloc_irq_cache_splice(cache);
726 __bio_alloc_cache_prune(cache, nr);
727 }
be4d234d
JA
728}
729
730static int bio_cpu_dead(unsigned int cpu, struct hlist_node *node)
731{
732 struct bio_set *bs;
733
734 bs = hlist_entry_safe(node, struct bio_set, cpuhp_dead);
735 if (bs->cache) {
736 struct bio_alloc_cache *cache = per_cpu_ptr(bs->cache, cpu);
737
738 bio_alloc_cache_prune(cache, -1U);
739 }
740 return 0;
741}
742
743static void bio_alloc_cache_destroy(struct bio_set *bs)
744{
745 int cpu;
746
747 if (!bs->cache)
748 return;
749
750 cpuhp_state_remove_instance_nocalls(CPUHP_BIO_DEAD, &bs->cpuhp_dead);
751 for_each_possible_cpu(cpu) {
752 struct bio_alloc_cache *cache;
753
754 cache = per_cpu_ptr(bs->cache, cpu);
755 bio_alloc_cache_prune(cache, -1U);
756 }
757 free_percpu(bs->cache);
605f7415 758 bs->cache = NULL;
be4d234d
JA
759}
760
f25cf75a
PB
761static inline void bio_put_percpu_cache(struct bio *bio)
762{
763 struct bio_alloc_cache *cache;
764
765 cache = per_cpu_ptr(bio->bi_pool->cache, get_cpu());
b99182c5
PB
766 if (READ_ONCE(cache->nr_irq) + cache->nr > ALLOC_CACHE_MAX) {
767 put_cpu();
768 bio_free(bio);
769 return;
770 }
771
f25cf75a
PB
772 bio_uninit(bio);
773
774 if ((bio->bi_opf & REQ_POLLED) && !WARN_ON_ONCE(in_interrupt())) {
775 bio->bi_next = cache->free_list;
776 cache->free_list = bio;
777 cache->nr++;
778 } else {
b99182c5 779 unsigned long flags;
f25cf75a 780
b99182c5
PB
781 local_irq_save(flags);
782 bio->bi_next = cache->free_list_irq;
783 cache->free_list_irq = bio;
784 cache->nr_irq++;
785 local_irq_restore(flags);
786 }
f25cf75a
PB
787 put_cpu();
788}
789
1da177e4
LT
790/**
791 * bio_put - release a reference to a bio
792 * @bio: bio to release reference to
793 *
794 * Description:
795 * Put a reference to a &struct bio, either one you have gotten with
9b10f6a9 796 * bio_alloc, bio_get or bio_clone_*. The last put of a bio will free it.
1da177e4
LT
797 **/
798void bio_put(struct bio *bio)
799{
be4d234d 800 if (unlikely(bio_flagged(bio, BIO_REFFED))) {
9e8c0d0d 801 BUG_ON(!atomic_read(&bio->__bi_cnt));
be4d234d
JA
802 if (!atomic_dec_and_test(&bio->__bi_cnt))
803 return;
804 }
f25cf75a
PB
805 if (bio->bi_opf & REQ_ALLOC_CACHE)
806 bio_put_percpu_cache(bio);
807 else
be4d234d 808 bio_free(bio);
1da177e4 809}
a112a71d 810EXPORT_SYMBOL(bio_put);
1da177e4 811
a0e8de79 812static int __bio_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp)
59d276fe 813{
b7c44ed9 814 bio_set_flag(bio, BIO_CLONED);
ca474b73 815 bio->bi_ioprio = bio_src->bi_ioprio;
59d276fe 816 bio->bi_iter = bio_src->bi_iter;
20bd723e 817
7ecc56c6
CH
818 if (bio->bi_bdev) {
819 if (bio->bi_bdev == bio_src->bi_bdev &&
820 bio_flagged(bio_src, BIO_REMAPPED))
821 bio_set_flag(bio, BIO_REMAPPED);
822 bio_clone_blkg_association(bio, bio_src);
823 }
56b4b5ab
CH
824
825 if (bio_crypt_clone(bio, bio_src, gfp) < 0)
826 return -ENOMEM;
827 if (bio_integrity(bio_src) &&
828 bio_integrity_clone(bio, bio_src, gfp) < 0)
829 return -ENOMEM;
830 return 0;
59d276fe 831}
59d276fe
KO
832
833/**
abfc426d
CH
834 * bio_alloc_clone - clone a bio that shares the original bio's biovec
835 * @bdev: block_device to clone onto
a0e8de79
CH
836 * @bio_src: bio to clone from
837 * @gfp: allocation priority
838 * @bs: bio_set to allocate from
59d276fe 839 *
a0e8de79
CH
840 * Allocate a new bio that is a clone of @bio_src. The caller owns the returned
841 * bio, but not the actual data it points to.
842 *
843 * The caller must ensure that the return bio is not freed before @bio_src.
59d276fe 844 */
abfc426d
CH
845struct bio *bio_alloc_clone(struct block_device *bdev, struct bio *bio_src,
846 gfp_t gfp, struct bio_set *bs)
59d276fe 847{
a0e8de79 848 struct bio *bio;
59d276fe 849
abfc426d 850 bio = bio_alloc_bioset(bdev, 0, bio_src->bi_opf, gfp, bs);
a0e8de79 851 if (!bio)
59d276fe
KO
852 return NULL;
853
a0e8de79
CH
854 if (__bio_clone(bio, bio_src, gfp) < 0) {
855 bio_put(bio);
56b4b5ab
CH
856 return NULL;
857 }
a0e8de79 858 bio->bi_io_vec = bio_src->bi_io_vec;
59d276fe 859
a0e8de79 860 return bio;
59d276fe 861}
abfc426d 862EXPORT_SYMBOL(bio_alloc_clone);
59d276fe 863
a0e8de79 864/**
abfc426d
CH
865 * bio_init_clone - clone a bio that shares the original bio's biovec
866 * @bdev: block_device to clone onto
a0e8de79
CH
867 * @bio: bio to clone into
868 * @bio_src: bio to clone from
869 * @gfp: allocation priority
870 *
871 * Initialize a new bio in caller provided memory that is a clone of @bio_src.
872 * The caller owns the returned bio, but not the actual data it points to.
873 *
874 * The caller must ensure that @bio_src is not freed before @bio.
875 */
abfc426d
CH
876int bio_init_clone(struct block_device *bdev, struct bio *bio,
877 struct bio *bio_src, gfp_t gfp)
a0e8de79
CH
878{
879 int ret;
880
abfc426d 881 bio_init(bio, bdev, bio_src->bi_io_vec, 0, bio_src->bi_opf);
a0e8de79
CH
882 ret = __bio_clone(bio, bio_src, gfp);
883 if (ret)
884 bio_uninit(bio);
885 return ret;
886}
abfc426d 887EXPORT_SYMBOL(bio_init_clone);
a0e8de79 888
9a6083be
CH
889/**
890 * bio_full - check if the bio is full
891 * @bio: bio to check
892 * @len: length of one segment to be added
893 *
894 * Return true if @bio is full and one segment with @len bytes can't be
895 * added to the bio, otherwise return false
896 */
897static inline bool bio_full(struct bio *bio, unsigned len)
898{
899 if (bio->bi_vcnt >= bio->bi_max_vecs)
900 return true;
901 if (bio->bi_iter.bi_size > UINT_MAX - len)
902 return true;
903 return false;
904}
905
5919482e
ML
906static inline bool page_is_mergeable(const struct bio_vec *bv,
907 struct page *page, unsigned int len, unsigned int off,
ff896738 908 bool *same_page)
5919482e 909{
d8166519
MWO
910 size_t bv_end = bv->bv_offset + bv->bv_len;
911 phys_addr_t vec_end_addr = page_to_phys(bv->bv_page) + bv_end - 1;
5919482e
ML
912 phys_addr_t page_addr = page_to_phys(page);
913
914 if (vec_end_addr + 1 != page_addr + off)
915 return false;
916 if (xen_domain() && !xen_biovec_phys_mergeable(bv, page))
917 return false;
49580e69
LG
918 if (!zone_device_pages_have_same_pgmap(bv->bv_page, page))
919 return false;
52d52d1c 920
ff896738 921 *same_page = ((vec_end_addr & PAGE_MASK) == page_addr);
d8166519
MWO
922 if (*same_page)
923 return true;
11b331f8
AP
924 else if (IS_ENABLED(CONFIG_KMSAN))
925 return false;
d8166519 926 return (bv->bv_page + bv_end / PAGE_SIZE) == (page + off / PAGE_SIZE);
5919482e
ML
927}
928
9774b391
CH
929/**
930 * __bio_try_merge_page - try appending data to an existing bvec.
931 * @bio: destination bio
932 * @page: start page to add
933 * @len: length of the data to add
934 * @off: offset of the data relative to @page
935 * @same_page: return if the segment has been merged inside the same page
936 *
937 * Try to add the data at @page + @off to the last bvec of @bio. This is a
938 * useful optimisation for file systems with a block size smaller than the
939 * page size.
940 *
941 * Warn if (@len, @off) crosses pages in case that @same_page is true.
942 *
943 * Return %true on success or %false on failure.
944 */
945static bool __bio_try_merge_page(struct bio *bio, struct page *page,
946 unsigned int len, unsigned int off, bool *same_page)
947{
948 if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
949 return false;
950
951 if (bio->bi_vcnt > 0) {
952 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
953
954 if (page_is_mergeable(bv, page, len, off, same_page)) {
955 if (bio->bi_iter.bi_size > UINT_MAX - len) {
956 *same_page = false;
957 return false;
958 }
959 bv->bv_len += len;
960 bio->bi_iter.bi_size += len;
961 return true;
962 }
963 }
964 return false;
965}
966
e4581105
CH
967/*
968 * Try to merge a page into a segment, while obeying the hardware segment
969 * size limit. This is not for normal read/write bios, but for passthrough
970 * or Zone Append operations that we can't split.
971 */
972static bool bio_try_merge_hw_seg(struct request_queue *q, struct bio *bio,
973 struct page *page, unsigned len,
974 unsigned offset, bool *same_page)
489fbbcb 975{
384209cd 976 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
489fbbcb
ML
977 unsigned long mask = queue_segment_boundary(q);
978 phys_addr_t addr1 = page_to_phys(bv->bv_page) + bv->bv_offset;
979 phys_addr_t addr2 = page_to_phys(page) + offset + len - 1;
980
981 if ((addr1 | mask) != (addr2 | mask))
982 return false;
489fbbcb
ML
983 if (bv->bv_len + len > queue_max_segment_size(q))
984 return false;
384209cd 985 return __bio_try_merge_page(bio, page, len, offset, same_page);
489fbbcb
ML
986}
987
1da177e4 988/**
e4581105
CH
989 * bio_add_hw_page - attempt to add a page to a bio with hw constraints
990 * @q: the target queue
991 * @bio: destination bio
992 * @page: page to add
993 * @len: vec entry length
994 * @offset: vec entry offset
995 * @max_sectors: maximum number of sectors that can be added
996 * @same_page: return if the segment has been merged inside the same page
c66a14d0 997 *
e4581105
CH
998 * Add a page to a bio while respecting the hardware max_sectors, max_segment
999 * and gap limitations.
1da177e4 1000 */
e4581105 1001int bio_add_hw_page(struct request_queue *q, struct bio *bio,
19047087 1002 struct page *page, unsigned int len, unsigned int offset,
e4581105 1003 unsigned int max_sectors, bool *same_page)
1da177e4 1004{
1da177e4
LT
1005 struct bio_vec *bvec;
1006
e4581105 1007 if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
1da177e4
LT
1008 return 0;
1009
e4581105 1010 if (((bio->bi_iter.bi_size + len) >> 9) > max_sectors)
1da177e4
LT
1011 return 0;
1012
80cfd548 1013 if (bio->bi_vcnt > 0) {
e4581105 1014 if (bio_try_merge_hw_seg(q, bio, page, len, offset, same_page))
384209cd 1015 return len;
320ea869
CH
1016
1017 /*
1018 * If the queue doesn't support SG gaps and adding this segment
1019 * would create a gap, disallow it.
1020 */
384209cd 1021 bvec = &bio->bi_io_vec[bio->bi_vcnt - 1];
c55ddd90 1022 if (bvec_gap_to_prev(&q->limits, bvec, offset))
320ea869 1023 return 0;
80cfd548
JA
1024 }
1025
79d08f89 1026 if (bio_full(bio, len))
1da177e4
LT
1027 return 0;
1028
14ccb66b 1029 if (bio->bi_vcnt >= queue_max_segments(q))
489fbbcb
ML
1030 return 0;
1031
fcbf6a08
ML
1032 bvec = &bio->bi_io_vec[bio->bi_vcnt];
1033 bvec->bv_page = page;
1034 bvec->bv_len = len;
1035 bvec->bv_offset = offset;
1036 bio->bi_vcnt++;
dcdca753 1037 bio->bi_iter.bi_size += len;
1da177e4
LT
1038 return len;
1039}
19047087 1040
e4581105
CH
1041/**
1042 * bio_add_pc_page - attempt to add page to passthrough bio
1043 * @q: the target queue
1044 * @bio: destination bio
1045 * @page: page to add
1046 * @len: vec entry length
1047 * @offset: vec entry offset
1048 *
1049 * Attempt to add a page to the bio_vec maplist. This can fail for a
1050 * number of reasons, such as the bio being full or target block device
1051 * limitations. The target block device must allow bio's up to PAGE_SIZE,
1052 * so it is always possible to add a single page to an empty bio.
1053 *
1054 * This should only be used by passthrough bios.
1055 */
19047087
ML
1056int bio_add_pc_page(struct request_queue *q, struct bio *bio,
1057 struct page *page, unsigned int len, unsigned int offset)
1058{
d1916c86 1059 bool same_page = false;
e4581105
CH
1060 return bio_add_hw_page(q, bio, page, len, offset,
1061 queue_max_hw_sectors(q), &same_page);
19047087 1062}
a112a71d 1063EXPORT_SYMBOL(bio_add_pc_page);
6e68af66 1064
ae29333f
JT
1065/**
1066 * bio_add_zone_append_page - attempt to add page to zone-append bio
1067 * @bio: destination bio
1068 * @page: page to add
1069 * @len: vec entry length
1070 * @offset: vec entry offset
1071 *
1072 * Attempt to add a page to the bio_vec maplist of a bio that will be submitted
1073 * for a zone-append request. This can fail for a number of reasons, such as the
1074 * bio being full or the target block device is not a zoned block device or
1075 * other limitations of the target block device. The target block device must
1076 * allow bio's up to PAGE_SIZE, so it is always possible to add a single page
1077 * to an empty bio.
1078 *
1079 * Returns: number of bytes added to the bio, or 0 in case of a failure.
1080 */
1081int bio_add_zone_append_page(struct bio *bio, struct page *page,
1082 unsigned int len, unsigned int offset)
1083{
3caee463 1084 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
ae29333f
JT
1085 bool same_page = false;
1086
1087 if (WARN_ON_ONCE(bio_op(bio) != REQ_OP_ZONE_APPEND))
1088 return 0;
1089
edd1dbc8 1090 if (WARN_ON_ONCE(!bdev_is_zoned(bio->bi_bdev)))
ae29333f
JT
1091 return 0;
1092
1093 return bio_add_hw_page(q, bio, page, len, offset,
1094 queue_max_zone_append_sectors(q), &same_page);
1095}
1096EXPORT_SYMBOL_GPL(bio_add_zone_append_page);
1097
0aa69fd3 1098/**
551879a4 1099 * __bio_add_page - add page(s) to a bio in a new segment
0aa69fd3 1100 * @bio: destination bio
551879a4
ML
1101 * @page: start page to add
1102 * @len: length of the data to add, may cross pages
1103 * @off: offset of the data relative to @page, may cross pages
0aa69fd3
CH
1104 *
1105 * Add the data at @page + @off to @bio as a new bvec. The caller must ensure
1106 * that @bio has space for another bvec.
1107 */
1108void __bio_add_page(struct bio *bio, struct page *page,
1109 unsigned int len, unsigned int off)
1110{
1111 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt];
c66a14d0 1112
0aa69fd3 1113 WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
79d08f89 1114 WARN_ON_ONCE(bio_full(bio, len));
0aa69fd3
CH
1115
1116 bv->bv_page = page;
1117 bv->bv_offset = off;
1118 bv->bv_len = len;
c66a14d0 1119
c66a14d0 1120 bio->bi_iter.bi_size += len;
0aa69fd3
CH
1121 bio->bi_vcnt++;
1122}
1123EXPORT_SYMBOL_GPL(__bio_add_page);
1124
1125/**
551879a4 1126 * bio_add_page - attempt to add page(s) to bio
0aa69fd3 1127 * @bio: destination bio
551879a4
ML
1128 * @page: start page to add
1129 * @len: vec entry length, may cross pages
1130 * @offset: vec entry offset relative to @page, may cross pages
0aa69fd3 1131 *
551879a4 1132 * Attempt to add page(s) to the bio_vec maplist. This will only fail
0aa69fd3
CH
1133 * if either bio->bi_vcnt == bio->bi_max_vecs or it's a cloned bio.
1134 */
1135int bio_add_page(struct bio *bio, struct page *page,
1136 unsigned int len, unsigned int offset)
1137{
ff896738
CH
1138 bool same_page = false;
1139
1140 if (!__bio_try_merge_page(bio, page, len, offset, &same_page)) {
79d08f89 1141 if (bio_full(bio, len))
0aa69fd3
CH
1142 return 0;
1143 __bio_add_page(bio, page, len, offset);
1144 }
c66a14d0 1145 return len;
1da177e4 1146}
a112a71d 1147EXPORT_SYMBOL(bio_add_page);
1da177e4 1148
85f5a74c
MWO
1149/**
1150 * bio_add_folio - Attempt to add part of a folio to a bio.
1151 * @bio: BIO to add to.
1152 * @folio: Folio to add.
1153 * @len: How many bytes from the folio to add.
1154 * @off: First byte in this folio to add.
1155 *
1156 * Filesystems that use folios can call this function instead of calling
1157 * bio_add_page() for each page in the folio. If @off is bigger than
1158 * PAGE_SIZE, this function can create a bio_vec that starts in a page
1159 * after the bv_page. BIOs do not support folios that are 4GiB or larger.
1160 *
1161 * Return: Whether the addition was successful.
1162 */
1163bool bio_add_folio(struct bio *bio, struct folio *folio, size_t len,
1164 size_t off)
1165{
1166 if (len > UINT_MAX || off > UINT_MAX)
455a844d 1167 return false;
85f5a74c
MWO
1168 return bio_add_page(bio, &folio->page, len, off) > 0;
1169}
1170
c809084a 1171void __bio_release_pages(struct bio *bio, bool mark_dirty)
7321ecbf
CH
1172{
1173 struct bvec_iter_all iter_all;
1174 struct bio_vec *bvec;
7321ecbf 1175
d241a95f
CH
1176 bio_for_each_segment_all(bvec, bio, iter_all) {
1177 if (mark_dirty && !PageCompound(bvec->bv_page))
1178 set_page_dirty_lock(bvec->bv_page);
7321ecbf 1179 put_page(bvec->bv_page);
d241a95f 1180 }
7321ecbf 1181}
c809084a 1182EXPORT_SYMBOL_GPL(__bio_release_pages);
7321ecbf 1183
1bb6b810 1184void bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter)
6d0c48ae 1185{
fa5fa8ec
PB
1186 size_t size = iov_iter_count(iter);
1187
7a800a20 1188 WARN_ON_ONCE(bio->bi_max_vecs);
c42bca92 1189
fa5fa8ec
PB
1190 if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
1191 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
1192 size_t max_sectors = queue_max_zone_append_sectors(q);
1193
1194 size = min(size, max_sectors << SECTOR_SHIFT);
1195 }
1196
c42bca92 1197 bio->bi_vcnt = iter->nr_segs;
c42bca92
PB
1198 bio->bi_io_vec = (struct bio_vec *)iter->bvec;
1199 bio->bi_iter.bi_bvec_done = iter->iov_offset;
fa5fa8ec 1200 bio->bi_iter.bi_size = size;
ed97ce5e 1201 bio_set_flag(bio, BIO_NO_PAGE_REF);
977be012 1202 bio_set_flag(bio, BIO_CLONED);
7de55b7d 1203}
c42bca92 1204
c58c0074
KB
1205static int bio_iov_add_page(struct bio *bio, struct page *page,
1206 unsigned int len, unsigned int offset)
1207{
1208 bool same_page = false;
1209
1210 if (!__bio_try_merge_page(bio, page, len, offset, &same_page)) {
c58c0074
KB
1211 __bio_add_page(bio, page, len, offset);
1212 return 0;
1213 }
1214
1215 if (same_page)
1216 put_page(page);
1217 return 0;
1218}
1219
1220static int bio_iov_add_zone_append_page(struct bio *bio, struct page *page,
1221 unsigned int len, unsigned int offset)
1222{
1223 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
1224 bool same_page = false;
1225
1226 if (bio_add_hw_page(q, bio, page, len, offset,
1227 queue_max_zone_append_sectors(q), &same_page) != len)
1228 return -EINVAL;
1229 if (same_page)
1230 put_page(page);
1231 return 0;
1232}
1233
576ed913
CH
1234#define PAGE_PTRS_PER_BVEC (sizeof(struct bio_vec) / sizeof(struct page *))
1235
2cefe4db 1236/**
17d51b10 1237 * __bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
2cefe4db
KO
1238 * @bio: bio to add pages to
1239 * @iter: iov iterator describing the region to be mapped
1240 *
17d51b10 1241 * Pins pages from *iter and appends them to @bio's bvec array. The
2cefe4db 1242 * pages will have to be released using put_page() when done.
17d51b10 1243 * For multi-segment *iter, this function only adds pages from the
3cf14889 1244 * next non-empty segment of the iov iterator.
2cefe4db 1245 */
17d51b10 1246static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
2cefe4db 1247{
576ed913
CH
1248 unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt;
1249 unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt;
2cefe4db
KO
1250 struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
1251 struct page **pages = (struct page **)bv;
5e3e3f2e 1252 unsigned int gup_flags = 0;
576ed913 1253 ssize_t size, left;
e97424fd 1254 unsigned len, i = 0;
480cb846 1255 size_t offset, trim;
325347d9 1256 int ret = 0;
576ed913
CH
1257
1258 /*
1259 * Move page array up in the allocated memory for the bio vecs as far as
1260 * possible so that we can start filling biovecs from the beginning
1261 * without overwriting the temporary page array.
c58c0074 1262 */
576ed913
CH
1263 BUILD_BUG_ON(PAGE_PTRS_PER_BVEC < 2);
1264 pages += entries_left * (PAGE_PTRS_PER_BVEC - 1);
2cefe4db 1265
5e3e3f2e
LG
1266 if (bio->bi_bdev && blk_queue_pci_p2pdma(bio->bi_bdev->bd_disk->queue))
1267 gup_flags |= FOLL_PCI_P2PDMA;
1268
b1a000d3
KB
1269 /*
1270 * Each segment in the iov is required to be a block size multiple.
1271 * However, we may not be able to get the entire segment if it spans
1272 * more pages than bi_max_vecs allows, so we have to ALIGN_DOWN the
1273 * result to ensure the bio's total size is correct. The remainder of
1274 * the iov data will be picked up in the next bio iteration.
1275 */
5e3e3f2e
LG
1276 size = iov_iter_get_pages(iter, pages,
1277 UINT_MAX - bio->bi_iter.bi_size,
1278 nr_pages, &offset, gup_flags);
480cb846
AV
1279 if (unlikely(size <= 0))
1280 return size ? size : -EFAULT;
1281
1282 nr_pages = DIV_ROUND_UP(offset + size, PAGE_SIZE);
1283
1284 trim = size & (bdev_logical_block_size(bio->bi_bdev) - 1);
1285 iov_iter_revert(iter, trim);
1286
1287 size -= trim;
1288 if (unlikely(!size)) {
1289 ret = -EFAULT;
e97424fd
KB
1290 goto out;
1291 }
2cefe4db 1292
576ed913
CH
1293 for (left = size, i = 0; left > 0; left -= len, i++) {
1294 struct page *page = pages[i];
2cefe4db 1295
576ed913 1296 len = min_t(size_t, PAGE_SIZE - offset, left);
34cdb8c8 1297 if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
c58c0074
KB
1298 ret = bio_iov_add_zone_append_page(bio, page, len,
1299 offset);
e97424fd 1300 if (ret)
34cdb8c8 1301 break;
34cdb8c8
KB
1302 } else
1303 bio_iov_add_page(bio, page, len, offset);
45691804 1304
576ed913 1305 offset = 0;
2cefe4db
KO
1306 }
1307
480cb846 1308 iov_iter_revert(iter, left);
e97424fd
KB
1309out:
1310 while (i < nr_pages)
1311 put_page(pages[i++]);
1312
325347d9 1313 return ret;
2cefe4db 1314}
17d51b10
MW
1315
1316/**
6d0c48ae 1317 * bio_iov_iter_get_pages - add user or kernel pages to a bio
17d51b10 1318 * @bio: bio to add pages to
6d0c48ae
JA
1319 * @iter: iov iterator describing the region to be added
1320 *
1321 * This takes either an iterator pointing to user memory, or one pointing to
1322 * kernel pages (BVEC iterator). If we're adding user pages, we pin them and
1323 * map them into the kernel. On IO completion, the caller should put those
c42bca92
PB
1324 * pages. For bvec based iterators bio_iov_iter_get_pages() uses the provided
1325 * bvecs rather than copying them. Hence anyone issuing kiocb based IO needs
1326 * to ensure the bvecs and pages stay referenced until the submitted I/O is
1327 * completed by a call to ->ki_complete() or returns with an error other than
1328 * -EIOCBQUEUED. The caller needs to check if the bio is flagged BIO_NO_PAGE_REF
1329 * on IO completion. If it isn't, then pages should be released.
17d51b10 1330 *
17d51b10 1331 * The function tries, but does not guarantee, to pin as many pages as
5cd3ddc1 1332 * fit into the bio, or are requested in @iter, whatever is smaller. If
6d0c48ae
JA
1333 * MM encounters an error pinning the requested pages, it stops. Error
1334 * is returned only if 0 pages could be pinned.
17d51b10
MW
1335 */
1336int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
1337{
c42bca92 1338 int ret = 0;
14eacf12 1339
c42bca92 1340 if (iov_iter_is_bvec(iter)) {
fa5fa8ec
PB
1341 bio_iov_bvec_set(bio, iter);
1342 iov_iter_advance(iter, bio->bi_iter.bi_size);
1343 return 0;
c42bca92 1344 }
17d51b10
MW
1345
1346 do {
c58c0074 1347 ret = __bio_iov_iter_get_pages(bio, iter);
79d08f89 1348 } while (!ret && iov_iter_count(iter) && !bio_full(bio, 0));
17d51b10 1349
14eacf12 1350 return bio->bi_vcnt ? 0 : ret;
17d51b10 1351}
29b2a3aa 1352EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages);
2cefe4db 1353
4246a0b6 1354static void submit_bio_wait_endio(struct bio *bio)
9e882242 1355{
65e53aab 1356 complete(bio->bi_private);
9e882242
KO
1357}
1358
1359/**
1360 * submit_bio_wait - submit a bio, and wait until it completes
9e882242
KO
1361 * @bio: The &struct bio which describes the I/O
1362 *
1363 * Simple wrapper around submit_bio(). Returns 0 on success, or the error from
1364 * bio_endio() on failure.
3d289d68
JK
1365 *
1366 * WARNING: Unlike to how submit_bio() is usually used, this function does not
1367 * result in bio reference to be consumed. The caller must drop the reference
1368 * on his own.
9e882242 1369 */
4e49ea4a 1370int submit_bio_wait(struct bio *bio)
9e882242 1371{
309dca30
CH
1372 DECLARE_COMPLETION_ONSTACK_MAP(done,
1373 bio->bi_bdev->bd_disk->lockdep_map);
de6a78b6 1374 unsigned long hang_check;
9e882242 1375
65e53aab 1376 bio->bi_private = &done;
9e882242 1377 bio->bi_end_io = submit_bio_wait_endio;
1eff9d32 1378 bio->bi_opf |= REQ_SYNC;
4e49ea4a 1379 submit_bio(bio);
de6a78b6
ML
1380
1381 /* Prevent hang_check timer from firing at us during very long I/O */
1382 hang_check = sysctl_hung_task_timeout_secs;
1383 if (hang_check)
1384 while (!wait_for_completion_io_timeout(&done,
1385 hang_check * (HZ/2)))
1386 ;
1387 else
1388 wait_for_completion_io(&done);
9e882242 1389
65e53aab 1390 return blk_status_to_errno(bio->bi_status);
9e882242
KO
1391}
1392EXPORT_SYMBOL(submit_bio_wait);
1393
d4aa57a1 1394void __bio_advance(struct bio *bio, unsigned bytes)
054bdf64
KO
1395{
1396 if (bio_integrity(bio))
1397 bio_integrity_advance(bio, bytes);
1398
a892c8d5 1399 bio_crypt_advance(bio, bytes);
4550dd6c 1400 bio_advance_iter(bio, &bio->bi_iter, bytes);
054bdf64 1401}
d4aa57a1 1402EXPORT_SYMBOL(__bio_advance);
054bdf64 1403
38a72dac 1404/**
45db54d5
KO
1405 * bio_copy_data - copy contents of data buffers from one bio to another
1406 * @src: source bio
1407 * @dst: destination bio
38a72dac
KO
1408 *
1409 * Stops when it reaches the end of either @src or @dst - that is, copies
1410 * min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of bios).
1411 */
1412void bio_copy_data(struct bio *dst, struct bio *src)
1413{
45db54d5
KO
1414 struct bvec_iter src_iter = src->bi_iter;
1415 struct bvec_iter dst_iter = dst->bi_iter;
1416
db1c7d77
CH
1417 while (src_iter.bi_size && dst_iter.bi_size) {
1418 struct bio_vec src_bv = bio_iter_iovec(src, src_iter);
1419 struct bio_vec dst_bv = bio_iter_iovec(dst, dst_iter);
1420 unsigned int bytes = min(src_bv.bv_len, dst_bv.bv_len);
1421 void *src_buf = bvec_kmap_local(&src_bv);
1422 void *dst_buf = bvec_kmap_local(&dst_bv);
1423
1424 memcpy(dst_buf, src_buf, bytes);
1425
1426 kunmap_local(dst_buf);
1427 kunmap_local(src_buf);
1428
1429 bio_advance_iter_single(src, &src_iter, bytes);
1430 bio_advance_iter_single(dst, &dst_iter, bytes);
1431 }
38a72dac 1432}
16ac3d63
KO
1433EXPORT_SYMBOL(bio_copy_data);
1434
491221f8 1435void bio_free_pages(struct bio *bio)
1dfa0f68
CH
1436{
1437 struct bio_vec *bvec;
6dc4f100 1438 struct bvec_iter_all iter_all;
1dfa0f68 1439
2b070cfe 1440 bio_for_each_segment_all(bvec, bio, iter_all)
1dfa0f68
CH
1441 __free_page(bvec->bv_page);
1442}
491221f8 1443EXPORT_SYMBOL(bio_free_pages);
1dfa0f68 1444
1da177e4
LT
1445/*
1446 * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
1447 * for performing direct-IO in BIOs.
1448 *
1449 * The problem is that we cannot run set_page_dirty() from interrupt context
1450 * because the required locks are not interrupt-safe. So what we can do is to
1451 * mark the pages dirty _before_ performing IO. And in interrupt context,
1452 * check that the pages are still dirty. If so, fine. If not, redirty them
1453 * in process context.
1454 *
1455 * We special-case compound pages here: normally this means reads into hugetlb
1456 * pages. The logic in here doesn't really work right for compound pages
1457 * because the VM does not uniformly chase down the head page in all cases.
1458 * But dirtiness of compound pages is pretty meaningless anyway: the VM doesn't
1459 * handle them at all. So we skip compound pages here at an early stage.
1460 *
1461 * Note that this code is very hard to test under normal circumstances because
1462 * direct-io pins the pages with get_user_pages(). This makes
1463 * is_page_cache_freeable return false, and the VM will not clean the pages.
0d5c3eba 1464 * But other code (eg, flusher threads) could clean the pages if they are mapped
1da177e4
LT
1465 * pagecache.
1466 *
1467 * Simply disabling the call to bio_set_pages_dirty() is a good way to test the
1468 * deferred bio dirtying paths.
1469 */
1470
1471/*
1472 * bio_set_pages_dirty() will mark all the bio's pages as dirty.
1473 */
1474void bio_set_pages_dirty(struct bio *bio)
1475{
cb34e057 1476 struct bio_vec *bvec;
6dc4f100 1477 struct bvec_iter_all iter_all;
1da177e4 1478
2b070cfe 1479 bio_for_each_segment_all(bvec, bio, iter_all) {
3bb50983
CH
1480 if (!PageCompound(bvec->bv_page))
1481 set_page_dirty_lock(bvec->bv_page);
1da177e4
LT
1482 }
1483}
1484
1da177e4
LT
1485/*
1486 * bio_check_pages_dirty() will check that all the BIO's pages are still dirty.
1487 * If they are, then fine. If, however, some pages are clean then they must
1488 * have been written out during the direct-IO read. So we take another ref on
24d5493f 1489 * the BIO and re-dirty the pages in process context.
1da177e4
LT
1490 *
1491 * It is expected that bio_check_pages_dirty() will wholly own the BIO from
ea1754a0
KS
1492 * here on. It will run one put_page() against each page and will run one
1493 * bio_put() against the BIO.
1da177e4
LT
1494 */
1495
65f27f38 1496static void bio_dirty_fn(struct work_struct *work);
1da177e4 1497
65f27f38 1498static DECLARE_WORK(bio_dirty_work, bio_dirty_fn);
1da177e4
LT
1499static DEFINE_SPINLOCK(bio_dirty_lock);
1500static struct bio *bio_dirty_list;
1501
1502/*
1503 * This runs in process context
1504 */
65f27f38 1505static void bio_dirty_fn(struct work_struct *work)
1da177e4 1506{
24d5493f 1507 struct bio *bio, *next;
1da177e4 1508
24d5493f
CH
1509 spin_lock_irq(&bio_dirty_lock);
1510 next = bio_dirty_list;
1da177e4 1511 bio_dirty_list = NULL;
24d5493f 1512 spin_unlock_irq(&bio_dirty_lock);
1da177e4 1513
24d5493f
CH
1514 while ((bio = next) != NULL) {
1515 next = bio->bi_private;
1da177e4 1516
d241a95f 1517 bio_release_pages(bio, true);
1da177e4 1518 bio_put(bio);
1da177e4
LT
1519 }
1520}
1521
1522void bio_check_pages_dirty(struct bio *bio)
1523{
cb34e057 1524 struct bio_vec *bvec;
24d5493f 1525 unsigned long flags;
6dc4f100 1526 struct bvec_iter_all iter_all;
1da177e4 1527
2b070cfe 1528 bio_for_each_segment_all(bvec, bio, iter_all) {
24d5493f
CH
1529 if (!PageDirty(bvec->bv_page) && !PageCompound(bvec->bv_page))
1530 goto defer;
1da177e4
LT
1531 }
1532
d241a95f 1533 bio_release_pages(bio, false);
24d5493f
CH
1534 bio_put(bio);
1535 return;
1536defer:
1537 spin_lock_irqsave(&bio_dirty_lock, flags);
1538 bio->bi_private = bio_dirty_list;
1539 bio_dirty_list = bio;
1540 spin_unlock_irqrestore(&bio_dirty_lock, flags);
1541 schedule_work(&bio_dirty_work);
1da177e4
LT
1542}
1543
c4cf5261
JA
1544static inline bool bio_remaining_done(struct bio *bio)
1545{
1546 /*
1547 * If we're not chaining, then ->__bi_remaining is always 1 and
1548 * we always end io on the first invocation.
1549 */
1550 if (!bio_flagged(bio, BIO_CHAIN))
1551 return true;
1552
1553 BUG_ON(atomic_read(&bio->__bi_remaining) <= 0);
1554
326e1dbb 1555 if (atomic_dec_and_test(&bio->__bi_remaining)) {
b7c44ed9 1556 bio_clear_flag(bio, BIO_CHAIN);
c4cf5261 1557 return true;
326e1dbb 1558 }
c4cf5261
JA
1559
1560 return false;
1561}
1562
1da177e4
LT
1563/**
1564 * bio_endio - end I/O on a bio
1565 * @bio: bio
1da177e4
LT
1566 *
1567 * Description:
4246a0b6
CH
1568 * bio_endio() will end I/O on the whole bio. bio_endio() is the preferred
1569 * way to end I/O on a bio. No one should call bi_end_io() directly on a
1570 * bio unless they own it and thus know that it has an end_io function.
fbbaf700
N
1571 *
1572 * bio_endio() can be called several times on a bio that has been chained
1573 * using bio_chain(). The ->bi_end_io() function will only be called the
60b6a7e6 1574 * last time.
1da177e4 1575 **/
4246a0b6 1576void bio_endio(struct bio *bio)
1da177e4 1577{
ba8c6967 1578again:
2b885517 1579 if (!bio_remaining_done(bio))
ba8c6967 1580 return;
7c20f116
CH
1581 if (!bio_integrity_endio(bio))
1582 return;
1da177e4 1583
aa1b46dc 1584 rq_qos_done_bio(bio);
67b42d0b 1585
60b6a7e6 1586 if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
3caee463 1587 trace_block_bio_complete(bdev_get_queue(bio->bi_bdev), bio);
60b6a7e6
EH
1588 bio_clear_flag(bio, BIO_TRACE_COMPLETION);
1589 }
1590
ba8c6967
CH
1591 /*
1592 * Need to have a real endio function for chained bios, otherwise
1593 * various corner cases will break (like stacking block devices that
1594 * save/restore bi_end_io) - however, we want to avoid unbounded
1595 * recursion and blowing the stack. Tail call optimization would
1596 * handle this, but compiling with frame pointers also disables
1597 * gcc's sibling call optimization.
1598 */
1599 if (bio->bi_end_io == bio_chain_endio) {
1600 bio = __bio_chain_endio(bio);
1601 goto again;
196d38bc 1602 }
ba8c6967 1603
9e234eea 1604 blk_throtl_bio_endio(bio);
b222dd2f
SL
1605 /* release cgroup info */
1606 bio_uninit(bio);
ba8c6967
CH
1607 if (bio->bi_end_io)
1608 bio->bi_end_io(bio);
1da177e4 1609}
a112a71d 1610EXPORT_SYMBOL(bio_endio);
1da177e4 1611
20d0189b
KO
1612/**
1613 * bio_split - split a bio
1614 * @bio: bio to split
1615 * @sectors: number of sectors to split from the front of @bio
1616 * @gfp: gfp mask
1617 * @bs: bio set to allocate from
1618 *
1619 * Allocates and returns a new bio which represents @sectors from the start of
1620 * @bio, and updates @bio to represent the remaining sectors.
1621 *
f3f5da62 1622 * Unless this is a discard request the newly allocated bio will point
dad77584
BVA
1623 * to @bio's bi_io_vec. It is the caller's responsibility to ensure that
1624 * neither @bio nor @bs are freed before the split bio.
20d0189b
KO
1625 */
1626struct bio *bio_split(struct bio *bio, int sectors,
1627 gfp_t gfp, struct bio_set *bs)
1628{
f341a4d3 1629 struct bio *split;
20d0189b
KO
1630
1631 BUG_ON(sectors <= 0);
1632 BUG_ON(sectors >= bio_sectors(bio));
1633
0512a75b
KB
1634 /* Zone append commands cannot be split */
1635 if (WARN_ON_ONCE(bio_op(bio) == REQ_OP_ZONE_APPEND))
1636 return NULL;
1637
abfc426d 1638 split = bio_alloc_clone(bio->bi_bdev, bio, gfp, bs);
20d0189b
KO
1639 if (!split)
1640 return NULL;
1641
1642 split->bi_iter.bi_size = sectors << 9;
1643
1644 if (bio_integrity(split))
fbd08e76 1645 bio_integrity_trim(split);
20d0189b
KO
1646
1647 bio_advance(bio, split->bi_iter.bi_size);
1648
fbbaf700 1649 if (bio_flagged(bio, BIO_TRACE_COMPLETION))
20d59023 1650 bio_set_flag(split, BIO_TRACE_COMPLETION);
fbbaf700 1651
20d0189b
KO
1652 return split;
1653}
1654EXPORT_SYMBOL(bio_split);
1655
6678d83f
KO
1656/**
1657 * bio_trim - trim a bio
1658 * @bio: bio to trim
1659 * @offset: number of sectors to trim from the front of @bio
1660 * @size: size we want to trim @bio to, in sectors
e83502ca
CK
1661 *
1662 * This function is typically used for bios that are cloned and submitted
1663 * to the underlying device in parts.
6678d83f 1664 */
e83502ca 1665void bio_trim(struct bio *bio, sector_t offset, sector_t size)
6678d83f 1666{
e83502ca 1667 if (WARN_ON_ONCE(offset > BIO_MAX_SECTORS || size > BIO_MAX_SECTORS ||
8535c018 1668 offset + size > bio_sectors(bio)))
e83502ca 1669 return;
6678d83f
KO
1670
1671 size <<= 9;
4f024f37 1672 if (offset == 0 && size == bio->bi_iter.bi_size)
6678d83f
KO
1673 return;
1674
6678d83f 1675 bio_advance(bio, offset << 9);
4f024f37 1676 bio->bi_iter.bi_size = size;
376a78ab
DM
1677
1678 if (bio_integrity(bio))
fbd08e76 1679 bio_integrity_trim(bio);
6678d83f
KO
1680}
1681EXPORT_SYMBOL_GPL(bio_trim);
1682
1da177e4
LT
1683/*
1684 * create memory pools for biovec's in a bio_set.
1685 * use the global biovec slabs created for general use.
1686 */
8aa6ba2f 1687int biovec_init_pool(mempool_t *pool, int pool_entries)
1da177e4 1688{
7a800a20 1689 struct biovec_slab *bp = bvec_slabs + ARRAY_SIZE(bvec_slabs) - 1;
1da177e4 1690
8aa6ba2f 1691 return mempool_init_slab_pool(pool, pool_entries, bp->slab);
1da177e4
LT
1692}
1693
917a38c7
KO
1694/*
1695 * bioset_exit - exit a bioset initialized with bioset_init()
1696 *
1697 * May be called on a zeroed but uninitialized bioset (i.e. allocated with
1698 * kzalloc()).
1699 */
1700void bioset_exit(struct bio_set *bs)
1da177e4 1701{
be4d234d 1702 bio_alloc_cache_destroy(bs);
df2cb6da
KO
1703 if (bs->rescue_workqueue)
1704 destroy_workqueue(bs->rescue_workqueue);
917a38c7 1705 bs->rescue_workqueue = NULL;
df2cb6da 1706
8aa6ba2f
KO
1707 mempool_exit(&bs->bio_pool);
1708 mempool_exit(&bs->bvec_pool);
9f060e22 1709
7878cba9 1710 bioset_integrity_free(bs);
917a38c7
KO
1711 if (bs->bio_slab)
1712 bio_put_slab(bs);
1713 bs->bio_slab = NULL;
1714}
1715EXPORT_SYMBOL(bioset_exit);
1da177e4 1716
917a38c7
KO
1717/**
1718 * bioset_init - Initialize a bio_set
dad08527 1719 * @bs: pool to initialize
917a38c7
KO
1720 * @pool_size: Number of bio and bio_vecs to cache in the mempool
1721 * @front_pad: Number of bytes to allocate in front of the returned bio
1722 * @flags: Flags to modify behavior, currently %BIOSET_NEED_BVECS
1723 * and %BIOSET_NEED_RESCUER
1724 *
dad08527
KO
1725 * Description:
1726 * Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller
1727 * to ask for a number of bytes to be allocated in front of the bio.
1728 * Front pad allocation is useful for embedding the bio inside
1729 * another structure, to avoid allocating extra data to go with the bio.
1730 * Note that the bio must be embedded at the END of that structure always,
1731 * or things will break badly.
1732 * If %BIOSET_NEED_BVECS is set in @flags, a separate pool will be allocated
abfc426d
CH
1733 * for allocating iovecs. This pool is not needed e.g. for bio_init_clone().
1734 * If %BIOSET_NEED_RESCUER is set, a workqueue is created which can be used
1735 * to dispatch queued requests when the mempool runs out of space.
dad08527 1736 *
917a38c7
KO
1737 */
1738int bioset_init(struct bio_set *bs,
1739 unsigned int pool_size,
1740 unsigned int front_pad,
1741 int flags)
1742{
917a38c7 1743 bs->front_pad = front_pad;
9f180e31
ML
1744 if (flags & BIOSET_NEED_BVECS)
1745 bs->back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec);
1746 else
1747 bs->back_pad = 0;
917a38c7
KO
1748
1749 spin_lock_init(&bs->rescue_lock);
1750 bio_list_init(&bs->rescue_list);
1751 INIT_WORK(&bs->rescue_work, bio_alloc_rescue);
1752
49d1ec85 1753 bs->bio_slab = bio_find_or_create_slab(bs);
917a38c7
KO
1754 if (!bs->bio_slab)
1755 return -ENOMEM;
1756
1757 if (mempool_init_slab_pool(&bs->bio_pool, pool_size, bs->bio_slab))
1758 goto bad;
1759
1760 if ((flags & BIOSET_NEED_BVECS) &&
1761 biovec_init_pool(&bs->bvec_pool, pool_size))
1762 goto bad;
1763
be4d234d
JA
1764 if (flags & BIOSET_NEED_RESCUER) {
1765 bs->rescue_workqueue = alloc_workqueue("bioset",
1766 WQ_MEM_RECLAIM, 0);
1767 if (!bs->rescue_workqueue)
1768 goto bad;
1769 }
1770 if (flags & BIOSET_PERCPU_CACHE) {
1771 bs->cache = alloc_percpu(struct bio_alloc_cache);
1772 if (!bs->cache)
1773 goto bad;
1774 cpuhp_state_add_instance_nocalls(CPUHP_BIO_DEAD, &bs->cpuhp_dead);
1775 }
917a38c7
KO
1776
1777 return 0;
1778bad:
1779 bioset_exit(bs);
1780 return -ENOMEM;
1781}
1782EXPORT_SYMBOL(bioset_init);
1783
de76fd89 1784static int __init init_bio(void)
1da177e4
LT
1785{
1786 int i;
1787
7878cba9 1788 bio_integrity_init();
1da177e4 1789
de76fd89
CH
1790 for (i = 0; i < ARRAY_SIZE(bvec_slabs); i++) {
1791 struct biovec_slab *bvs = bvec_slabs + i;
a7fcd37c 1792
de76fd89
CH
1793 bvs->slab = kmem_cache_create(bvs->name,
1794 bvs->nr_vecs * sizeof(struct bio_vec), 0,
1795 SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
1da177e4 1796 }
1da177e4 1797
be4d234d
JA
1798 cpuhp_setup_state_multi(CPUHP_BIO_DEAD, "block/bio:dead", NULL,
1799 bio_cpu_dead);
1800
12c5b70c
JA
1801 if (bioset_init(&fs_bio_set, BIO_POOL_SIZE, 0,
1802 BIOSET_NEED_BVECS | BIOSET_PERCPU_CACHE))
1da177e4
LT
1803 panic("bio: can't allocate bios\n");
1804
f4f8154a 1805 if (bioset_integrity_create(&fs_bio_set, BIO_POOL_SIZE))
a91a2785
MP
1806 panic("bio: can't create integrity pool\n");
1807
1da177e4
LT
1808 return 0;
1809}
1da177e4 1810subsys_initcall(init_bio);