block: move bio_get_{first,last}_bvec out of bio.h
[linux-block.git] / block / bio.c
CommitLineData
8c16567d 1// SPDX-License-Identifier: GPL-2.0
1da177e4 2/*
0fe23479 3 * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk>
1da177e4
LT
4 */
5#include <linux/mm.h>
6#include <linux/swap.h>
7#include <linux/bio.h>
8#include <linux/blkdev.h>
a27bb332 9#include <linux/uio.h>
852c788f 10#include <linux/iocontext.h>
1da177e4
LT
11#include <linux/slab.h>
12#include <linux/init.h>
13#include <linux/kernel.h>
630d9c47 14#include <linux/export.h>
1da177e4
LT
15#include <linux/mempool.h>
16#include <linux/workqueue.h>
852c788f 17#include <linux/cgroup.h>
08e18eab 18#include <linux/blk-cgroup.h>
b4c5875d 19#include <linux/highmem.h>
de6a78b6 20#include <linux/sched/sysctl.h>
a892c8d5 21#include <linux/blk-crypto.h>
49d1ec85 22#include <linux/xarray.h>
1da177e4 23
55782138 24#include <trace/events/block.h>
9e234eea 25#include "blk.h"
67b42d0b 26#include "blk-rq-qos.h"
0bfc2455 27
be4d234d
JA
28struct bio_alloc_cache {
29 struct bio_list free_list;
30 unsigned int nr;
31};
32
de76fd89 33static struct biovec_slab {
6ac0b715
CH
34 int nr_vecs;
35 char *name;
36 struct kmem_cache *slab;
de76fd89
CH
37} bvec_slabs[] __read_mostly = {
38 { .nr_vecs = 16, .name = "biovec-16" },
39 { .nr_vecs = 64, .name = "biovec-64" },
40 { .nr_vecs = 128, .name = "biovec-128" },
a8affc03 41 { .nr_vecs = BIO_MAX_VECS, .name = "biovec-max" },
1da177e4 42};
6ac0b715 43
7a800a20
CH
44static struct biovec_slab *biovec_slab(unsigned short nr_vecs)
45{
46 switch (nr_vecs) {
47 /* smaller bios use inline vecs */
48 case 5 ... 16:
49 return &bvec_slabs[0];
50 case 17 ... 64:
51 return &bvec_slabs[1];
52 case 65 ... 128:
53 return &bvec_slabs[2];
a8affc03 54 case 129 ... BIO_MAX_VECS:
7a800a20
CH
55 return &bvec_slabs[3];
56 default:
57 BUG();
58 return NULL;
59 }
60}
1da177e4 61
1da177e4
LT
62/*
63 * fs_bio_set is the bio_set containing bio and iovec memory pools used by
64 * IO code that does not need private memory pools.
65 */
f4f8154a 66struct bio_set fs_bio_set;
3f86a82a 67EXPORT_SYMBOL(fs_bio_set);
1da177e4 68
bb799ca0
JA
69/*
70 * Our slab pool management
71 */
72struct bio_slab {
73 struct kmem_cache *slab;
74 unsigned int slab_ref;
75 unsigned int slab_size;
76 char name[8];
77};
78static DEFINE_MUTEX(bio_slab_lock);
49d1ec85 79static DEFINE_XARRAY(bio_slabs);
bb799ca0 80
49d1ec85 81static struct bio_slab *create_bio_slab(unsigned int size)
bb799ca0 82{
49d1ec85 83 struct bio_slab *bslab = kzalloc(sizeof(*bslab), GFP_KERNEL);
bb799ca0 84
49d1ec85
ML
85 if (!bslab)
86 return NULL;
bb799ca0 87
49d1ec85
ML
88 snprintf(bslab->name, sizeof(bslab->name), "bio-%d", size);
89 bslab->slab = kmem_cache_create(bslab->name, size,
90 ARCH_KMALLOC_MINALIGN, SLAB_HWCACHE_ALIGN, NULL);
91 if (!bslab->slab)
92 goto fail_alloc_slab;
bb799ca0 93
49d1ec85
ML
94 bslab->slab_ref = 1;
95 bslab->slab_size = size;
bb799ca0 96
49d1ec85
ML
97 if (!xa_err(xa_store(&bio_slabs, size, bslab, GFP_KERNEL)))
98 return bslab;
bb799ca0 99
49d1ec85 100 kmem_cache_destroy(bslab->slab);
bb799ca0 101
49d1ec85
ML
102fail_alloc_slab:
103 kfree(bslab);
104 return NULL;
105}
bb799ca0 106
49d1ec85
ML
107static inline unsigned int bs_bio_slab_size(struct bio_set *bs)
108{
9f180e31 109 return bs->front_pad + sizeof(struct bio) + bs->back_pad;
49d1ec85 110}
bb799ca0 111
49d1ec85
ML
112static struct kmem_cache *bio_find_or_create_slab(struct bio_set *bs)
113{
114 unsigned int size = bs_bio_slab_size(bs);
115 struct bio_slab *bslab;
bb799ca0 116
49d1ec85
ML
117 mutex_lock(&bio_slab_lock);
118 bslab = xa_load(&bio_slabs, size);
119 if (bslab)
120 bslab->slab_ref++;
121 else
122 bslab = create_bio_slab(size);
bb799ca0 123 mutex_unlock(&bio_slab_lock);
49d1ec85
ML
124
125 if (bslab)
126 return bslab->slab;
127 return NULL;
bb799ca0
JA
128}
129
130static void bio_put_slab(struct bio_set *bs)
131{
132 struct bio_slab *bslab = NULL;
49d1ec85 133 unsigned int slab_size = bs_bio_slab_size(bs);
bb799ca0
JA
134
135 mutex_lock(&bio_slab_lock);
136
49d1ec85 137 bslab = xa_load(&bio_slabs, slab_size);
bb799ca0
JA
138 if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n"))
139 goto out;
140
49d1ec85
ML
141 WARN_ON_ONCE(bslab->slab != bs->bio_slab);
142
bb799ca0
JA
143 WARN_ON(!bslab->slab_ref);
144
145 if (--bslab->slab_ref)
146 goto out;
147
49d1ec85
ML
148 xa_erase(&bio_slabs, slab_size);
149
bb799ca0 150 kmem_cache_destroy(bslab->slab);
49d1ec85 151 kfree(bslab);
bb799ca0
JA
152
153out:
154 mutex_unlock(&bio_slab_lock);
155}
156
7a800a20 157void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned short nr_vecs)
7ba1ba12 158{
9e8c0d0d 159 BUG_ON(nr_vecs > BIO_MAX_VECS);
ed996a52 160
a8affc03 161 if (nr_vecs == BIO_MAX_VECS)
9f060e22 162 mempool_free(bv, pool);
7a800a20
CH
163 else if (nr_vecs > BIO_INLINE_VECS)
164 kmem_cache_free(biovec_slab(nr_vecs)->slab, bv);
bb799ca0 165}
bb799ca0 166
f2c3eb9b
CH
167/*
168 * Make the first allocation restricted and don't dump info on allocation
169 * failures, since we'll fall back to the mempool in case of failure.
170 */
171static inline gfp_t bvec_alloc_gfp(gfp_t gfp)
172{
173 return (gfp & ~(__GFP_DIRECT_RECLAIM | __GFP_IO)) |
174 __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
bb799ca0
JA
175}
176
7a800a20
CH
177struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs,
178 gfp_t gfp_mask)
1da177e4 179{
7a800a20 180 struct biovec_slab *bvs = biovec_slab(*nr_vecs);
1da177e4 181
7a800a20 182 if (WARN_ON_ONCE(!bvs))
7ff9345f 183 return NULL;
7ff9345f
JA
184
185 /*
7a800a20
CH
186 * Upgrade the nr_vecs request to take full advantage of the allocation.
187 * We also rely on this in the bvec_free path.
7ff9345f 188 */
7a800a20 189 *nr_vecs = bvs->nr_vecs;
7ff9345f 190
7ff9345f 191 /*
f007a3d6
CH
192 * Try a slab allocation first for all smaller allocations. If that
193 * fails and __GFP_DIRECT_RECLAIM is set retry with the mempool.
a8affc03 194 * The mempool is sized to handle up to BIO_MAX_VECS entries.
7ff9345f 195 */
a8affc03 196 if (*nr_vecs < BIO_MAX_VECS) {
f007a3d6 197 struct bio_vec *bvl;
1da177e4 198
f2c3eb9b 199 bvl = kmem_cache_alloc(bvs->slab, bvec_alloc_gfp(gfp_mask));
7a800a20 200 if (likely(bvl) || !(gfp_mask & __GFP_DIRECT_RECLAIM))
f007a3d6 201 return bvl;
a8affc03 202 *nr_vecs = BIO_MAX_VECS;
7ff9345f
JA
203 }
204
f007a3d6 205 return mempool_alloc(pool, gfp_mask);
1da177e4
LT
206}
207
9ae3b3f5 208void bio_uninit(struct bio *bio)
1da177e4 209{
db9819c7
CH
210#ifdef CONFIG_BLK_CGROUP
211 if (bio->bi_blkg) {
212 blkg_put(bio->bi_blkg);
213 bio->bi_blkg = NULL;
214 }
215#endif
ece841ab
JT
216 if (bio_integrity(bio))
217 bio_integrity_free(bio);
a892c8d5
ST
218
219 bio_crypt_free_ctx(bio);
4254bba1 220}
9ae3b3f5 221EXPORT_SYMBOL(bio_uninit);
7ba1ba12 222
4254bba1
KO
223static void bio_free(struct bio *bio)
224{
225 struct bio_set *bs = bio->bi_pool;
226 void *p;
227
9ae3b3f5 228 bio_uninit(bio);
4254bba1
KO
229
230 if (bs) {
7a800a20 231 bvec_free(&bs->bvec_pool, bio->bi_io_vec, bio->bi_max_vecs);
4254bba1
KO
232
233 /*
234 * If we have front padding, adjust the bio pointer before freeing
235 */
236 p = bio;
bb799ca0
JA
237 p -= bs->front_pad;
238
8aa6ba2f 239 mempool_free(p, &bs->bio_pool);
4254bba1
KO
240 } else {
241 /* Bio was allocated by bio_kmalloc() */
242 kfree(bio);
243 }
3676347a
PO
244}
245
9ae3b3f5
JA
246/*
247 * Users of this function have their own bio allocation. Subsequently,
248 * they must remember to pair any call to bio_init() with bio_uninit()
249 * when IO has completed, or when the bio is released.
250 */
3a83f467
ML
251void bio_init(struct bio *bio, struct bio_vec *table,
252 unsigned short max_vecs)
1da177e4 253{
da521626
JA
254 bio->bi_next = NULL;
255 bio->bi_bdev = NULL;
256 bio->bi_opf = 0;
257 bio->bi_flags = 0;
258 bio->bi_ioprio = 0;
259 bio->bi_write_hint = 0;
260 bio->bi_status = 0;
261 bio->bi_iter.bi_sector = 0;
262 bio->bi_iter.bi_size = 0;
263 bio->bi_iter.bi_idx = 0;
264 bio->bi_iter.bi_bvec_done = 0;
265 bio->bi_end_io = NULL;
266 bio->bi_private = NULL;
267#ifdef CONFIG_BLK_CGROUP
268 bio->bi_blkg = NULL;
269 bio->bi_issue.value = 0;
270#ifdef CONFIG_BLK_CGROUP_IOCOST
271 bio->bi_iocost_cost = 0;
272#endif
273#endif
274#ifdef CONFIG_BLK_INLINE_ENCRYPTION
275 bio->bi_crypt_context = NULL;
276#endif
277#ifdef CONFIG_BLK_DEV_INTEGRITY
278 bio->bi_integrity = NULL;
279#endif
280 bio->bi_vcnt = 0;
281
c4cf5261 282 atomic_set(&bio->__bi_remaining, 1);
dac56212 283 atomic_set(&bio->__bi_cnt, 1);
3a83f467 284
3a83f467 285 bio->bi_max_vecs = max_vecs;
da521626
JA
286 bio->bi_io_vec = table;
287 bio->bi_pool = NULL;
1da177e4 288}
a112a71d 289EXPORT_SYMBOL(bio_init);
1da177e4 290
f44b48c7
KO
291/**
292 * bio_reset - reinitialize a bio
293 * @bio: bio to reset
294 *
295 * Description:
296 * After calling bio_reset(), @bio will be in the same state as a freshly
297 * allocated bio returned bio bio_alloc_bioset() - the only fields that are
298 * preserved are the ones that are initialized by bio_alloc_bioset(). See
299 * comment in struct bio.
300 */
301void bio_reset(struct bio *bio)
302{
9ae3b3f5 303 bio_uninit(bio);
f44b48c7 304 memset(bio, 0, BIO_RESET_BYTES);
c4cf5261 305 atomic_set(&bio->__bi_remaining, 1);
f44b48c7
KO
306}
307EXPORT_SYMBOL(bio_reset);
308
38f8baae 309static struct bio *__bio_chain_endio(struct bio *bio)
196d38bc 310{
4246a0b6
CH
311 struct bio *parent = bio->bi_private;
312
3edf5346 313 if (bio->bi_status && !parent->bi_status)
4e4cbee9 314 parent->bi_status = bio->bi_status;
196d38bc 315 bio_put(bio);
38f8baae
CH
316 return parent;
317}
318
319static void bio_chain_endio(struct bio *bio)
320{
321 bio_endio(__bio_chain_endio(bio));
196d38bc
KO
322}
323
324/**
325 * bio_chain - chain bio completions
1051a902 326 * @bio: the target bio
5b874af6 327 * @parent: the parent bio of @bio
196d38bc
KO
328 *
329 * The caller won't have a bi_end_io called when @bio completes - instead,
330 * @parent's bi_end_io won't be called until both @parent and @bio have
331 * completed; the chained bio will also be freed when it completes.
332 *
333 * The caller must not set bi_private or bi_end_io in @bio.
334 */
335void bio_chain(struct bio *bio, struct bio *parent)
336{
337 BUG_ON(bio->bi_private || bio->bi_end_io);
338
339 bio->bi_private = parent;
340 bio->bi_end_io = bio_chain_endio;
c4cf5261 341 bio_inc_remaining(parent);
196d38bc
KO
342}
343EXPORT_SYMBOL(bio_chain);
344
df2cb6da
KO
345static void bio_alloc_rescue(struct work_struct *work)
346{
347 struct bio_set *bs = container_of(work, struct bio_set, rescue_work);
348 struct bio *bio;
349
350 while (1) {
351 spin_lock(&bs->rescue_lock);
352 bio = bio_list_pop(&bs->rescue_list);
353 spin_unlock(&bs->rescue_lock);
354
355 if (!bio)
356 break;
357
ed00aabd 358 submit_bio_noacct(bio);
df2cb6da
KO
359 }
360}
361
362static void punt_bios_to_rescuer(struct bio_set *bs)
363{
364 struct bio_list punt, nopunt;
365 struct bio *bio;
366
47e0fb46
N
367 if (WARN_ON_ONCE(!bs->rescue_workqueue))
368 return;
df2cb6da
KO
369 /*
370 * In order to guarantee forward progress we must punt only bios that
371 * were allocated from this bio_set; otherwise, if there was a bio on
372 * there for a stacking driver higher up in the stack, processing it
373 * could require allocating bios from this bio_set, and doing that from
374 * our own rescuer would be bad.
375 *
376 * Since bio lists are singly linked, pop them all instead of trying to
377 * remove from the middle of the list:
378 */
379
380 bio_list_init(&punt);
381 bio_list_init(&nopunt);
382
f5fe1b51 383 while ((bio = bio_list_pop(&current->bio_list[0])))
df2cb6da 384 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
f5fe1b51 385 current->bio_list[0] = nopunt;
df2cb6da 386
f5fe1b51
N
387 bio_list_init(&nopunt);
388 while ((bio = bio_list_pop(&current->bio_list[1])))
389 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
390 current->bio_list[1] = nopunt;
df2cb6da
KO
391
392 spin_lock(&bs->rescue_lock);
393 bio_list_merge(&bs->rescue_list, &punt);
394 spin_unlock(&bs->rescue_lock);
395
396 queue_work(bs->rescue_workqueue, &bs->rescue_work);
397}
398
1da177e4
LT
399/**
400 * bio_alloc_bioset - allocate a bio for I/O
519c8e9f 401 * @gfp_mask: the GFP_* mask given to the slab allocator
1da177e4 402 * @nr_iovecs: number of iovecs to pre-allocate
db18efac 403 * @bs: the bio_set to allocate from.
1da177e4 404 *
3175199a 405 * Allocate a bio from the mempools in @bs.
3f86a82a 406 *
3175199a
CH
407 * If %__GFP_DIRECT_RECLAIM is set then bio_alloc will always be able to
408 * allocate a bio. This is due to the mempool guarantees. To make this work,
409 * callers must never allocate more than 1 bio at a time from the general pool.
410 * Callers that need to allocate more than 1 bio must always submit the
411 * previously allocated bio for IO before attempting to allocate a new one.
412 * Failure to do so can cause deadlocks under memory pressure.
3f86a82a 413 *
3175199a
CH
414 * Note that when running under submit_bio_noacct() (i.e. any block driver),
415 * bios are not submitted until after you return - see the code in
416 * submit_bio_noacct() that converts recursion into iteration, to prevent
417 * stack overflows.
df2cb6da 418 *
3175199a
CH
419 * This would normally mean allocating multiple bios under submit_bio_noacct()
420 * would be susceptible to deadlocks, but we have
421 * deadlock avoidance code that resubmits any blocked bios from a rescuer
422 * thread.
df2cb6da 423 *
3175199a
CH
424 * However, we do not guarantee forward progress for allocations from other
425 * mempools. Doing multiple allocations from the same mempool under
426 * submit_bio_noacct() should be avoided - instead, use bio_set's front_pad
427 * for per bio allocations.
df2cb6da 428 *
3175199a 429 * Returns: Pointer to new bio on success, NULL on failure.
3f86a82a 430 */
0f2e6ab8 431struct bio *bio_alloc_bioset(gfp_t gfp_mask, unsigned short nr_iovecs,
7a88fa19 432 struct bio_set *bs)
1da177e4 433{
df2cb6da 434 gfp_t saved_gfp = gfp_mask;
451a9ebf
TH
435 struct bio *bio;
436 void *p;
437
3175199a
CH
438 /* should not use nobvec bioset for nr_iovecs > 0 */
439 if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) && nr_iovecs > 0))
440 return NULL;
df2cb6da 441
3175199a
CH
442 /*
443 * submit_bio_noacct() converts recursion to iteration; this means if
444 * we're running beneath it, any bios we allocate and submit will not be
445 * submitted (and thus freed) until after we return.
446 *
447 * This exposes us to a potential deadlock if we allocate multiple bios
448 * from the same bio_set() while running underneath submit_bio_noacct().
449 * If we were to allocate multiple bios (say a stacking block driver
450 * that was splitting bios), we would deadlock if we exhausted the
451 * mempool's reserve.
452 *
453 * We solve this, and guarantee forward progress, with a rescuer
454 * workqueue per bio_set. If we go to allocate and there are bios on
455 * current->bio_list, we first try the allocation without
456 * __GFP_DIRECT_RECLAIM; if that fails, we punt those bios we would be
457 * blocking to the rescuer workqueue before we retry with the original
458 * gfp_flags.
459 */
460 if (current->bio_list &&
461 (!bio_list_empty(&current->bio_list[0]) ||
462 !bio_list_empty(&current->bio_list[1])) &&
463 bs->rescue_workqueue)
464 gfp_mask &= ~__GFP_DIRECT_RECLAIM;
465
466 p = mempool_alloc(&bs->bio_pool, gfp_mask);
467 if (!p && gfp_mask != saved_gfp) {
468 punt_bios_to_rescuer(bs);
469 gfp_mask = saved_gfp;
8aa6ba2f 470 p = mempool_alloc(&bs->bio_pool, gfp_mask);
3f86a82a 471 }
451a9ebf
TH
472 if (unlikely(!p))
473 return NULL;
1da177e4 474
3175199a
CH
475 bio = p + bs->front_pad;
476 if (nr_iovecs > BIO_INLINE_VECS) {
3175199a 477 struct bio_vec *bvl = NULL;
34053979 478
7a800a20 479 bvl = bvec_alloc(&bs->bvec_pool, &nr_iovecs, gfp_mask);
df2cb6da
KO
480 if (!bvl && gfp_mask != saved_gfp) {
481 punt_bios_to_rescuer(bs);
482 gfp_mask = saved_gfp;
7a800a20 483 bvl = bvec_alloc(&bs->bvec_pool, &nr_iovecs, gfp_mask);
df2cb6da 484 }
34053979
IM
485 if (unlikely(!bvl))
486 goto err_free;
a38352e0 487
7a800a20 488 bio_init(bio, bvl, nr_iovecs);
3f86a82a 489 } else if (nr_iovecs) {
3175199a
CH
490 bio_init(bio, bio->bi_inline_vecs, BIO_INLINE_VECS);
491 } else {
492 bio_init(bio, NULL, 0);
1da177e4 493 }
3f86a82a
KO
494
495 bio->bi_pool = bs;
1da177e4 496 return bio;
34053979
IM
497
498err_free:
8aa6ba2f 499 mempool_free(p, &bs->bio_pool);
34053979 500 return NULL;
1da177e4 501}
a112a71d 502EXPORT_SYMBOL(bio_alloc_bioset);
1da177e4 503
3175199a
CH
504/**
505 * bio_kmalloc - kmalloc a bio for I/O
506 * @gfp_mask: the GFP_* mask given to the slab allocator
507 * @nr_iovecs: number of iovecs to pre-allocate
508 *
509 * Use kmalloc to allocate and initialize a bio.
510 *
511 * Returns: Pointer to new bio on success, NULL on failure.
512 */
0f2e6ab8 513struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned short nr_iovecs)
3175199a
CH
514{
515 struct bio *bio;
516
517 if (nr_iovecs > UIO_MAXIOV)
518 return NULL;
519
520 bio = kmalloc(struct_size(bio, bi_inline_vecs, nr_iovecs), gfp_mask);
521 if (unlikely(!bio))
522 return NULL;
523 bio_init(bio, nr_iovecs ? bio->bi_inline_vecs : NULL, nr_iovecs);
524 bio->bi_pool = NULL;
525 return bio;
526}
527EXPORT_SYMBOL(bio_kmalloc);
528
6f822e1b 529void zero_fill_bio(struct bio *bio)
1da177e4 530{
7988613b
KO
531 struct bio_vec bv;
532 struct bvec_iter iter;
1da177e4 533
ab6c340e
CH
534 bio_for_each_segment(bv, bio, iter)
535 memzero_bvec(&bv);
1da177e4 536}
6f822e1b 537EXPORT_SYMBOL(zero_fill_bio);
1da177e4 538
83c9c547
ML
539/**
540 * bio_truncate - truncate the bio to small size of @new_size
541 * @bio: the bio to be truncated
542 * @new_size: new size for truncating the bio
543 *
544 * Description:
545 * Truncate the bio to new size of @new_size. If bio_op(bio) is
546 * REQ_OP_READ, zero the truncated part. This function should only
547 * be used for handling corner cases, such as bio eod.
548 */
85a8ce62
ML
549void bio_truncate(struct bio *bio, unsigned new_size)
550{
551 struct bio_vec bv;
552 struct bvec_iter iter;
553 unsigned int done = 0;
554 bool truncated = false;
555
556 if (new_size >= bio->bi_iter.bi_size)
557 return;
558
83c9c547 559 if (bio_op(bio) != REQ_OP_READ)
85a8ce62
ML
560 goto exit;
561
562 bio_for_each_segment(bv, bio, iter) {
563 if (done + bv.bv_len > new_size) {
564 unsigned offset;
565
566 if (!truncated)
567 offset = new_size - done;
568 else
569 offset = 0;
570 zero_user(bv.bv_page, offset, bv.bv_len - offset);
571 truncated = true;
572 }
573 done += bv.bv_len;
574 }
575
576 exit:
577 /*
578 * Don't touch bvec table here and make it really immutable, since
579 * fs bio user has to retrieve all pages via bio_for_each_segment_all
580 * in its .end_bio() callback.
581 *
582 * It is enough to truncate bio by updating .bi_size since we can make
583 * correct bvec with the updated .bi_size for drivers.
584 */
585 bio->bi_iter.bi_size = new_size;
586}
587
29125ed6
CH
588/**
589 * guard_bio_eod - truncate a BIO to fit the block device
590 * @bio: bio to truncate
591 *
592 * This allows us to do IO even on the odd last sectors of a device, even if the
593 * block size is some multiple of the physical sector size.
594 *
595 * We'll just truncate the bio to the size of the device, and clear the end of
596 * the buffer head manually. Truly out-of-range accesses will turn into actual
597 * I/O errors, this only handles the "we need to be able to do I/O at the final
598 * sector" case.
599 */
600void guard_bio_eod(struct bio *bio)
601{
309dca30 602 sector_t maxsector = bdev_nr_sectors(bio->bi_bdev);
29125ed6
CH
603
604 if (!maxsector)
605 return;
606
607 /*
608 * If the *whole* IO is past the end of the device,
609 * let it through, and the IO layer will turn it into
610 * an EIO.
611 */
612 if (unlikely(bio->bi_iter.bi_sector >= maxsector))
613 return;
614
615 maxsector -= bio->bi_iter.bi_sector;
616 if (likely((bio->bi_iter.bi_size >> 9) <= maxsector))
617 return;
618
619 bio_truncate(bio, maxsector << 9);
620}
621
be4d234d
JA
622#define ALLOC_CACHE_MAX 512
623#define ALLOC_CACHE_SLACK 64
624
625static void bio_alloc_cache_prune(struct bio_alloc_cache *cache,
626 unsigned int nr)
627{
628 unsigned int i = 0;
629 struct bio *bio;
630
631 while ((bio = bio_list_pop(&cache->free_list)) != NULL) {
632 cache->nr--;
633 bio_free(bio);
634 if (++i == nr)
635 break;
636 }
637}
638
639static int bio_cpu_dead(unsigned int cpu, struct hlist_node *node)
640{
641 struct bio_set *bs;
642
643 bs = hlist_entry_safe(node, struct bio_set, cpuhp_dead);
644 if (bs->cache) {
645 struct bio_alloc_cache *cache = per_cpu_ptr(bs->cache, cpu);
646
647 bio_alloc_cache_prune(cache, -1U);
648 }
649 return 0;
650}
651
652static void bio_alloc_cache_destroy(struct bio_set *bs)
653{
654 int cpu;
655
656 if (!bs->cache)
657 return;
658
659 cpuhp_state_remove_instance_nocalls(CPUHP_BIO_DEAD, &bs->cpuhp_dead);
660 for_each_possible_cpu(cpu) {
661 struct bio_alloc_cache *cache;
662
663 cache = per_cpu_ptr(bs->cache, cpu);
664 bio_alloc_cache_prune(cache, -1U);
665 }
666 free_percpu(bs->cache);
667}
668
1da177e4
LT
669/**
670 * bio_put - release a reference to a bio
671 * @bio: bio to release reference to
672 *
673 * Description:
674 * Put a reference to a &struct bio, either one you have gotten with
9b10f6a9 675 * bio_alloc, bio_get or bio_clone_*. The last put of a bio will free it.
1da177e4
LT
676 **/
677void bio_put(struct bio *bio)
678{
be4d234d 679 if (unlikely(bio_flagged(bio, BIO_REFFED))) {
9e8c0d0d 680 BUG_ON(!atomic_read(&bio->__bi_cnt));
be4d234d
JA
681 if (!atomic_dec_and_test(&bio->__bi_cnt))
682 return;
683 }
dac56212 684
be4d234d
JA
685 if (bio_flagged(bio, BIO_PERCPU_CACHE)) {
686 struct bio_alloc_cache *cache;
687
688 bio_uninit(bio);
689 cache = per_cpu_ptr(bio->bi_pool->cache, get_cpu());
690 bio_list_add_head(&cache->free_list, bio);
691 if (++cache->nr > ALLOC_CACHE_MAX + ALLOC_CACHE_SLACK)
692 bio_alloc_cache_prune(cache, ALLOC_CACHE_SLACK);
693 put_cpu();
694 } else {
695 bio_free(bio);
dac56212 696 }
1da177e4 697}
a112a71d 698EXPORT_SYMBOL(bio_put);
1da177e4 699
59d276fe
KO
700/**
701 * __bio_clone_fast - clone a bio that shares the original bio's biovec
702 * @bio: destination bio
703 * @bio_src: bio to clone
704 *
705 * Clone a &bio. Caller will own the returned bio, but not
706 * the actual data it points to. Reference count of returned
707 * bio will be one.
708 *
709 * Caller must ensure that @bio_src is not freed before @bio.
710 */
711void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
712{
7a800a20 713 WARN_ON_ONCE(bio->bi_pool && bio->bi_max_vecs);
59d276fe
KO
714
715 /*
309dca30 716 * most users will be overriding ->bi_bdev with a new target,
59d276fe
KO
717 * so we don't set nor calculate new physical/hw segment counts here
718 */
309dca30 719 bio->bi_bdev = bio_src->bi_bdev;
b7c44ed9 720 bio_set_flag(bio, BIO_CLONED);
111be883
SL
721 if (bio_flagged(bio_src, BIO_THROTTLED))
722 bio_set_flag(bio, BIO_THROTTLED);
46bbf653
CH
723 if (bio_flagged(bio_src, BIO_REMAPPED))
724 bio_set_flag(bio, BIO_REMAPPED);
1eff9d32 725 bio->bi_opf = bio_src->bi_opf;
ca474b73 726 bio->bi_ioprio = bio_src->bi_ioprio;
cb6934f8 727 bio->bi_write_hint = bio_src->bi_write_hint;
59d276fe
KO
728 bio->bi_iter = bio_src->bi_iter;
729 bio->bi_io_vec = bio_src->bi_io_vec;
20bd723e 730
db6638d7 731 bio_clone_blkg_association(bio, bio_src);
e439bedf 732 blkcg_bio_issue_init(bio);
59d276fe
KO
733}
734EXPORT_SYMBOL(__bio_clone_fast);
735
736/**
737 * bio_clone_fast - clone a bio that shares the original bio's biovec
738 * @bio: bio to clone
739 * @gfp_mask: allocation priority
740 * @bs: bio_set to allocate from
741 *
742 * Like __bio_clone_fast, only also allocates the returned bio
743 */
744struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs)
745{
746 struct bio *b;
747
748 b = bio_alloc_bioset(gfp_mask, 0, bs);
749 if (!b)
750 return NULL;
751
752 __bio_clone_fast(b, bio);
753
07560151
EB
754 if (bio_crypt_clone(b, bio, gfp_mask) < 0)
755 goto err_put;
a892c8d5 756
07560151
EB
757 if (bio_integrity(bio) &&
758 bio_integrity_clone(b, bio, gfp_mask) < 0)
759 goto err_put;
59d276fe
KO
760
761 return b;
07560151
EB
762
763err_put:
764 bio_put(b);
765 return NULL;
59d276fe
KO
766}
767EXPORT_SYMBOL(bio_clone_fast);
768
5cbd28e3
CH
769const char *bio_devname(struct bio *bio, char *buf)
770{
309dca30 771 return bdevname(bio->bi_bdev, buf);
5cbd28e3
CH
772}
773EXPORT_SYMBOL(bio_devname);
774
9a6083be
CH
775/**
776 * bio_full - check if the bio is full
777 * @bio: bio to check
778 * @len: length of one segment to be added
779 *
780 * Return true if @bio is full and one segment with @len bytes can't be
781 * added to the bio, otherwise return false
782 */
783static inline bool bio_full(struct bio *bio, unsigned len)
784{
785 if (bio->bi_vcnt >= bio->bi_max_vecs)
786 return true;
787 if (bio->bi_iter.bi_size > UINT_MAX - len)
788 return true;
789 return false;
790}
791
5919482e
ML
792static inline bool page_is_mergeable(const struct bio_vec *bv,
793 struct page *page, unsigned int len, unsigned int off,
ff896738 794 bool *same_page)
5919482e 795{
d8166519
MWO
796 size_t bv_end = bv->bv_offset + bv->bv_len;
797 phys_addr_t vec_end_addr = page_to_phys(bv->bv_page) + bv_end - 1;
5919482e
ML
798 phys_addr_t page_addr = page_to_phys(page);
799
800 if (vec_end_addr + 1 != page_addr + off)
801 return false;
802 if (xen_domain() && !xen_biovec_phys_mergeable(bv, page))
803 return false;
52d52d1c 804
ff896738 805 *same_page = ((vec_end_addr & PAGE_MASK) == page_addr);
d8166519
MWO
806 if (*same_page)
807 return true;
808 return (bv->bv_page + bv_end / PAGE_SIZE) == (page + off / PAGE_SIZE);
5919482e
ML
809}
810
9774b391
CH
811/**
812 * __bio_try_merge_page - try appending data to an existing bvec.
813 * @bio: destination bio
814 * @page: start page to add
815 * @len: length of the data to add
816 * @off: offset of the data relative to @page
817 * @same_page: return if the segment has been merged inside the same page
818 *
819 * Try to add the data at @page + @off to the last bvec of @bio. This is a
820 * useful optimisation for file systems with a block size smaller than the
821 * page size.
822 *
823 * Warn if (@len, @off) crosses pages in case that @same_page is true.
824 *
825 * Return %true on success or %false on failure.
826 */
827static bool __bio_try_merge_page(struct bio *bio, struct page *page,
828 unsigned int len, unsigned int off, bool *same_page)
829{
830 if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
831 return false;
832
833 if (bio->bi_vcnt > 0) {
834 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
835
836 if (page_is_mergeable(bv, page, len, off, same_page)) {
837 if (bio->bi_iter.bi_size > UINT_MAX - len) {
838 *same_page = false;
839 return false;
840 }
841 bv->bv_len += len;
842 bio->bi_iter.bi_size += len;
843 return true;
844 }
845 }
846 return false;
847}
848
e4581105
CH
849/*
850 * Try to merge a page into a segment, while obeying the hardware segment
851 * size limit. This is not for normal read/write bios, but for passthrough
852 * or Zone Append operations that we can't split.
853 */
854static bool bio_try_merge_hw_seg(struct request_queue *q, struct bio *bio,
855 struct page *page, unsigned len,
856 unsigned offset, bool *same_page)
489fbbcb 857{
384209cd 858 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
489fbbcb
ML
859 unsigned long mask = queue_segment_boundary(q);
860 phys_addr_t addr1 = page_to_phys(bv->bv_page) + bv->bv_offset;
861 phys_addr_t addr2 = page_to_phys(page) + offset + len - 1;
862
863 if ((addr1 | mask) != (addr2 | mask))
864 return false;
489fbbcb
ML
865 if (bv->bv_len + len > queue_max_segment_size(q))
866 return false;
384209cd 867 return __bio_try_merge_page(bio, page, len, offset, same_page);
489fbbcb
ML
868}
869
1da177e4 870/**
e4581105
CH
871 * bio_add_hw_page - attempt to add a page to a bio with hw constraints
872 * @q: the target queue
873 * @bio: destination bio
874 * @page: page to add
875 * @len: vec entry length
876 * @offset: vec entry offset
877 * @max_sectors: maximum number of sectors that can be added
878 * @same_page: return if the segment has been merged inside the same page
c66a14d0 879 *
e4581105
CH
880 * Add a page to a bio while respecting the hardware max_sectors, max_segment
881 * and gap limitations.
1da177e4 882 */
e4581105 883int bio_add_hw_page(struct request_queue *q, struct bio *bio,
19047087 884 struct page *page, unsigned int len, unsigned int offset,
e4581105 885 unsigned int max_sectors, bool *same_page)
1da177e4 886{
1da177e4
LT
887 struct bio_vec *bvec;
888
e4581105 889 if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
1da177e4
LT
890 return 0;
891
e4581105 892 if (((bio->bi_iter.bi_size + len) >> 9) > max_sectors)
1da177e4
LT
893 return 0;
894
80cfd548 895 if (bio->bi_vcnt > 0) {
e4581105 896 if (bio_try_merge_hw_seg(q, bio, page, len, offset, same_page))
384209cd 897 return len;
320ea869
CH
898
899 /*
900 * If the queue doesn't support SG gaps and adding this segment
901 * would create a gap, disallow it.
902 */
384209cd 903 bvec = &bio->bi_io_vec[bio->bi_vcnt - 1];
320ea869
CH
904 if (bvec_gap_to_prev(q, bvec, offset))
905 return 0;
80cfd548
JA
906 }
907
79d08f89 908 if (bio_full(bio, len))
1da177e4
LT
909 return 0;
910
14ccb66b 911 if (bio->bi_vcnt >= queue_max_segments(q))
489fbbcb
ML
912 return 0;
913
fcbf6a08
ML
914 bvec = &bio->bi_io_vec[bio->bi_vcnt];
915 bvec->bv_page = page;
916 bvec->bv_len = len;
917 bvec->bv_offset = offset;
918 bio->bi_vcnt++;
dcdca753 919 bio->bi_iter.bi_size += len;
1da177e4
LT
920 return len;
921}
19047087 922
e4581105
CH
923/**
924 * bio_add_pc_page - attempt to add page to passthrough bio
925 * @q: the target queue
926 * @bio: destination bio
927 * @page: page to add
928 * @len: vec entry length
929 * @offset: vec entry offset
930 *
931 * Attempt to add a page to the bio_vec maplist. This can fail for a
932 * number of reasons, such as the bio being full or target block device
933 * limitations. The target block device must allow bio's up to PAGE_SIZE,
934 * so it is always possible to add a single page to an empty bio.
935 *
936 * This should only be used by passthrough bios.
937 */
19047087
ML
938int bio_add_pc_page(struct request_queue *q, struct bio *bio,
939 struct page *page, unsigned int len, unsigned int offset)
940{
d1916c86 941 bool same_page = false;
e4581105
CH
942 return bio_add_hw_page(q, bio, page, len, offset,
943 queue_max_hw_sectors(q), &same_page);
19047087 944}
a112a71d 945EXPORT_SYMBOL(bio_add_pc_page);
6e68af66 946
ae29333f
JT
947/**
948 * bio_add_zone_append_page - attempt to add page to zone-append bio
949 * @bio: destination bio
950 * @page: page to add
951 * @len: vec entry length
952 * @offset: vec entry offset
953 *
954 * Attempt to add a page to the bio_vec maplist of a bio that will be submitted
955 * for a zone-append request. This can fail for a number of reasons, such as the
956 * bio being full or the target block device is not a zoned block device or
957 * other limitations of the target block device. The target block device must
958 * allow bio's up to PAGE_SIZE, so it is always possible to add a single page
959 * to an empty bio.
960 *
961 * Returns: number of bytes added to the bio, or 0 in case of a failure.
962 */
963int bio_add_zone_append_page(struct bio *bio, struct page *page,
964 unsigned int len, unsigned int offset)
965{
582cd91f 966 struct request_queue *q = bio->bi_bdev->bd_disk->queue;
ae29333f
JT
967 bool same_page = false;
968
969 if (WARN_ON_ONCE(bio_op(bio) != REQ_OP_ZONE_APPEND))
970 return 0;
971
972 if (WARN_ON_ONCE(!blk_queue_is_zoned(q)))
973 return 0;
974
975 return bio_add_hw_page(q, bio, page, len, offset,
976 queue_max_zone_append_sectors(q), &same_page);
977}
978EXPORT_SYMBOL_GPL(bio_add_zone_append_page);
979
0aa69fd3 980/**
551879a4 981 * __bio_add_page - add page(s) to a bio in a new segment
0aa69fd3 982 * @bio: destination bio
551879a4
ML
983 * @page: start page to add
984 * @len: length of the data to add, may cross pages
985 * @off: offset of the data relative to @page, may cross pages
0aa69fd3
CH
986 *
987 * Add the data at @page + @off to @bio as a new bvec. The caller must ensure
988 * that @bio has space for another bvec.
989 */
990void __bio_add_page(struct bio *bio, struct page *page,
991 unsigned int len, unsigned int off)
992{
993 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt];
c66a14d0 994
0aa69fd3 995 WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
79d08f89 996 WARN_ON_ONCE(bio_full(bio, len));
0aa69fd3
CH
997
998 bv->bv_page = page;
999 bv->bv_offset = off;
1000 bv->bv_len = len;
c66a14d0 1001
c66a14d0 1002 bio->bi_iter.bi_size += len;
0aa69fd3 1003 bio->bi_vcnt++;
b8e24a93
JW
1004
1005 if (!bio_flagged(bio, BIO_WORKINGSET) && unlikely(PageWorkingset(page)))
1006 bio_set_flag(bio, BIO_WORKINGSET);
0aa69fd3
CH
1007}
1008EXPORT_SYMBOL_GPL(__bio_add_page);
1009
1010/**
551879a4 1011 * bio_add_page - attempt to add page(s) to bio
0aa69fd3 1012 * @bio: destination bio
551879a4
ML
1013 * @page: start page to add
1014 * @len: vec entry length, may cross pages
1015 * @offset: vec entry offset relative to @page, may cross pages
0aa69fd3 1016 *
551879a4 1017 * Attempt to add page(s) to the bio_vec maplist. This will only fail
0aa69fd3
CH
1018 * if either bio->bi_vcnt == bio->bi_max_vecs or it's a cloned bio.
1019 */
1020int bio_add_page(struct bio *bio, struct page *page,
1021 unsigned int len, unsigned int offset)
1022{
ff896738
CH
1023 bool same_page = false;
1024
1025 if (!__bio_try_merge_page(bio, page, len, offset, &same_page)) {
79d08f89 1026 if (bio_full(bio, len))
0aa69fd3
CH
1027 return 0;
1028 __bio_add_page(bio, page, len, offset);
1029 }
c66a14d0 1030 return len;
1da177e4 1031}
a112a71d 1032EXPORT_SYMBOL(bio_add_page);
1da177e4 1033
d241a95f 1034void bio_release_pages(struct bio *bio, bool mark_dirty)
7321ecbf
CH
1035{
1036 struct bvec_iter_all iter_all;
1037 struct bio_vec *bvec;
7321ecbf 1038
b2d0d991
CH
1039 if (bio_flagged(bio, BIO_NO_PAGE_REF))
1040 return;
1041
d241a95f
CH
1042 bio_for_each_segment_all(bvec, bio, iter_all) {
1043 if (mark_dirty && !PageCompound(bvec->bv_page))
1044 set_page_dirty_lock(bvec->bv_page);
7321ecbf 1045 put_page(bvec->bv_page);
d241a95f 1046 }
7321ecbf 1047}
29b2a3aa 1048EXPORT_SYMBOL_GPL(bio_release_pages);
7321ecbf 1049
7de55b7d 1050static void __bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter)
6d0c48ae 1051{
7a800a20 1052 WARN_ON_ONCE(bio->bi_max_vecs);
c42bca92
PB
1053
1054 bio->bi_vcnt = iter->nr_segs;
c42bca92
PB
1055 bio->bi_io_vec = (struct bio_vec *)iter->bvec;
1056 bio->bi_iter.bi_bvec_done = iter->iov_offset;
1057 bio->bi_iter.bi_size = iter->count;
ed97ce5e 1058 bio_set_flag(bio, BIO_NO_PAGE_REF);
977be012 1059 bio_set_flag(bio, BIO_CLONED);
7de55b7d 1060}
c42bca92 1061
7de55b7d
JT
1062static int bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter)
1063{
1064 __bio_iov_bvec_set(bio, iter);
c42bca92 1065 iov_iter_advance(iter, iter->count);
a10584c3 1066 return 0;
6d0c48ae
JA
1067}
1068
7de55b7d
JT
1069static int bio_iov_bvec_set_append(struct bio *bio, struct iov_iter *iter)
1070{
1071 struct request_queue *q = bio->bi_bdev->bd_disk->queue;
1072 struct iov_iter i = *iter;
1073
1074 iov_iter_truncate(&i, queue_max_zone_append_sectors(q) << 9);
1075 __bio_iov_bvec_set(bio, &i);
1076 iov_iter_advance(iter, i.count);
1077 return 0;
1078}
1079
d9cf3bd5
PB
1080static void bio_put_pages(struct page **pages, size_t size, size_t off)
1081{
1082 size_t i, nr = DIV_ROUND_UP(size + (off & ~PAGE_MASK), PAGE_SIZE);
1083
1084 for (i = 0; i < nr; i++)
1085 put_page(pages[i]);
1086}
1087
576ed913
CH
1088#define PAGE_PTRS_PER_BVEC (sizeof(struct bio_vec) / sizeof(struct page *))
1089
2cefe4db 1090/**
17d51b10 1091 * __bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
2cefe4db
KO
1092 * @bio: bio to add pages to
1093 * @iter: iov iterator describing the region to be mapped
1094 *
17d51b10 1095 * Pins pages from *iter and appends them to @bio's bvec array. The
2cefe4db 1096 * pages will have to be released using put_page() when done.
17d51b10 1097 * For multi-segment *iter, this function only adds pages from the
3cf14889 1098 * next non-empty segment of the iov iterator.
2cefe4db 1099 */
17d51b10 1100static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
2cefe4db 1101{
576ed913
CH
1102 unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt;
1103 unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt;
2cefe4db
KO
1104 struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
1105 struct page **pages = (struct page **)bv;
45691804 1106 bool same_page = false;
576ed913
CH
1107 ssize_t size, left;
1108 unsigned len, i;
b403ea24 1109 size_t offset;
576ed913
CH
1110
1111 /*
1112 * Move page array up in the allocated memory for the bio vecs as far as
1113 * possible so that we can start filling biovecs from the beginning
1114 * without overwriting the temporary page array.
1115 */
1116 BUILD_BUG_ON(PAGE_PTRS_PER_BVEC < 2);
1117 pages += entries_left * (PAGE_PTRS_PER_BVEC - 1);
2cefe4db 1118
35c820e7 1119 size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset);
2cefe4db
KO
1120 if (unlikely(size <= 0))
1121 return size ? size : -EFAULT;
2cefe4db 1122
576ed913
CH
1123 for (left = size, i = 0; left > 0; left -= len, i++) {
1124 struct page *page = pages[i];
2cefe4db 1125
576ed913 1126 len = min_t(size_t, PAGE_SIZE - offset, left);
45691804
CH
1127
1128 if (__bio_try_merge_page(bio, page, len, offset, &same_page)) {
1129 if (same_page)
1130 put_page(page);
1131 } else {
d9cf3bd5
PB
1132 if (WARN_ON_ONCE(bio_full(bio, len))) {
1133 bio_put_pages(pages + i, left, offset);
1134 return -EINVAL;
1135 }
45691804
CH
1136 __bio_add_page(bio, page, len, offset);
1137 }
576ed913 1138 offset = 0;
2cefe4db
KO
1139 }
1140
2cefe4db
KO
1141 iov_iter_advance(iter, size);
1142 return 0;
1143}
17d51b10 1144
0512a75b
KB
1145static int __bio_iov_append_get_pages(struct bio *bio, struct iov_iter *iter)
1146{
1147 unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt;
1148 unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt;
309dca30 1149 struct request_queue *q = bio->bi_bdev->bd_disk->queue;
0512a75b
KB
1150 unsigned int max_append_sectors = queue_max_zone_append_sectors(q);
1151 struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
1152 struct page **pages = (struct page **)bv;
1153 ssize_t size, left;
1154 unsigned len, i;
1155 size_t offset;
4977d121 1156 int ret = 0;
0512a75b
KB
1157
1158 if (WARN_ON_ONCE(!max_append_sectors))
1159 return 0;
1160
1161 /*
1162 * Move page array up in the allocated memory for the bio vecs as far as
1163 * possible so that we can start filling biovecs from the beginning
1164 * without overwriting the temporary page array.
1165 */
1166 BUILD_BUG_ON(PAGE_PTRS_PER_BVEC < 2);
1167 pages += entries_left * (PAGE_PTRS_PER_BVEC - 1);
1168
1169 size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset);
1170 if (unlikely(size <= 0))
1171 return size ? size : -EFAULT;
1172
1173 for (left = size, i = 0; left > 0; left -= len, i++) {
1174 struct page *page = pages[i];
1175 bool same_page = false;
1176
1177 len = min_t(size_t, PAGE_SIZE - offset, left);
1178 if (bio_add_hw_page(q, bio, page, len, offset,
4977d121 1179 max_append_sectors, &same_page) != len) {
d9cf3bd5 1180 bio_put_pages(pages + i, left, offset);
4977d121
NA
1181 ret = -EINVAL;
1182 break;
1183 }
0512a75b
KB
1184 if (same_page)
1185 put_page(page);
1186 offset = 0;
1187 }
1188
4977d121
NA
1189 iov_iter_advance(iter, size - left);
1190 return ret;
0512a75b
KB
1191}
1192
17d51b10 1193/**
6d0c48ae 1194 * bio_iov_iter_get_pages - add user or kernel pages to a bio
17d51b10 1195 * @bio: bio to add pages to
6d0c48ae
JA
1196 * @iter: iov iterator describing the region to be added
1197 *
1198 * This takes either an iterator pointing to user memory, or one pointing to
1199 * kernel pages (BVEC iterator). If we're adding user pages, we pin them and
1200 * map them into the kernel. On IO completion, the caller should put those
c42bca92
PB
1201 * pages. For bvec based iterators bio_iov_iter_get_pages() uses the provided
1202 * bvecs rather than copying them. Hence anyone issuing kiocb based IO needs
1203 * to ensure the bvecs and pages stay referenced until the submitted I/O is
1204 * completed by a call to ->ki_complete() or returns with an error other than
1205 * -EIOCBQUEUED. The caller needs to check if the bio is flagged BIO_NO_PAGE_REF
1206 * on IO completion. If it isn't, then pages should be released.
17d51b10 1207 *
17d51b10 1208 * The function tries, but does not guarantee, to pin as many pages as
5cd3ddc1 1209 * fit into the bio, or are requested in @iter, whatever is smaller. If
6d0c48ae
JA
1210 * MM encounters an error pinning the requested pages, it stops. Error
1211 * is returned only if 0 pages could be pinned.
0cf41e5e
PB
1212 *
1213 * It's intended for direct IO, so doesn't do PSI tracking, the caller is
1214 * responsible for setting BIO_WORKINGSET if necessary.
17d51b10
MW
1215 */
1216int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
1217{
c42bca92 1218 int ret = 0;
14eacf12 1219
c42bca92 1220 if (iov_iter_is_bvec(iter)) {
7de55b7d
JT
1221 if (bio_op(bio) == REQ_OP_ZONE_APPEND)
1222 return bio_iov_bvec_set_append(bio, iter);
ed97ce5e 1223 return bio_iov_bvec_set(bio, iter);
c42bca92 1224 }
17d51b10
MW
1225
1226 do {
86004515 1227 if (bio_op(bio) == REQ_OP_ZONE_APPEND)
0512a75b 1228 ret = __bio_iov_append_get_pages(bio, iter);
86004515
CH
1229 else
1230 ret = __bio_iov_iter_get_pages(bio, iter);
79d08f89 1231 } while (!ret && iov_iter_count(iter) && !bio_full(bio, 0));
17d51b10 1232
0cf41e5e
PB
1233 /* don't account direct I/O as memory stall */
1234 bio_clear_flag(bio, BIO_WORKINGSET);
14eacf12 1235 return bio->bi_vcnt ? 0 : ret;
17d51b10 1236}
29b2a3aa 1237EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages);
2cefe4db 1238
4246a0b6 1239static void submit_bio_wait_endio(struct bio *bio)
9e882242 1240{
65e53aab 1241 complete(bio->bi_private);
9e882242
KO
1242}
1243
1244/**
1245 * submit_bio_wait - submit a bio, and wait until it completes
9e882242
KO
1246 * @bio: The &struct bio which describes the I/O
1247 *
1248 * Simple wrapper around submit_bio(). Returns 0 on success, or the error from
1249 * bio_endio() on failure.
3d289d68
JK
1250 *
1251 * WARNING: Unlike to how submit_bio() is usually used, this function does not
1252 * result in bio reference to be consumed. The caller must drop the reference
1253 * on his own.
9e882242 1254 */
4e49ea4a 1255int submit_bio_wait(struct bio *bio)
9e882242 1256{
309dca30
CH
1257 DECLARE_COMPLETION_ONSTACK_MAP(done,
1258 bio->bi_bdev->bd_disk->lockdep_map);
de6a78b6 1259 unsigned long hang_check;
9e882242 1260
65e53aab 1261 bio->bi_private = &done;
9e882242 1262 bio->bi_end_io = submit_bio_wait_endio;
1eff9d32 1263 bio->bi_opf |= REQ_SYNC;
4e49ea4a 1264 submit_bio(bio);
de6a78b6
ML
1265
1266 /* Prevent hang_check timer from firing at us during very long I/O */
1267 hang_check = sysctl_hung_task_timeout_secs;
1268 if (hang_check)
1269 while (!wait_for_completion_io_timeout(&done,
1270 hang_check * (HZ/2)))
1271 ;
1272 else
1273 wait_for_completion_io(&done);
9e882242 1274
65e53aab 1275 return blk_status_to_errno(bio->bi_status);
9e882242
KO
1276}
1277EXPORT_SYMBOL(submit_bio_wait);
1278
054bdf64
KO
1279/**
1280 * bio_advance - increment/complete a bio by some number of bytes
1281 * @bio: bio to advance
1282 * @bytes: number of bytes to complete
1283 *
1284 * This updates bi_sector, bi_size and bi_idx; if the number of bytes to
1285 * complete doesn't align with a bvec boundary, then bv_len and bv_offset will
1286 * be updated on the last bvec as well.
1287 *
1288 * @bio will then represent the remaining, uncompleted portion of the io.
1289 */
1290void bio_advance(struct bio *bio, unsigned bytes)
1291{
1292 if (bio_integrity(bio))
1293 bio_integrity_advance(bio, bytes);
1294
a892c8d5 1295 bio_crypt_advance(bio, bytes);
4550dd6c 1296 bio_advance_iter(bio, &bio->bi_iter, bytes);
054bdf64
KO
1297}
1298EXPORT_SYMBOL(bio_advance);
1299
45db54d5
KO
1300void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
1301 struct bio *src, struct bvec_iter *src_iter)
16ac3d63 1302{
45db54d5 1303 while (src_iter->bi_size && dst_iter->bi_size) {
f8b679a0
CH
1304 struct bio_vec src_bv = bio_iter_iovec(src, *src_iter);
1305 struct bio_vec dst_bv = bio_iter_iovec(dst, *dst_iter);
1306 unsigned int bytes = min(src_bv.bv_len, dst_bv.bv_len);
1307 void *src_buf;
1308
1309 src_buf = bvec_kmap_local(&src_bv);
1310 memcpy_to_bvec(&dst_bv, src_buf);
1311 kunmap_local(src_buf);
6e6e811d 1312
22b56c29
PB
1313 bio_advance_iter_single(src, src_iter, bytes);
1314 bio_advance_iter_single(dst, dst_iter, bytes);
16ac3d63
KO
1315 }
1316}
38a72dac
KO
1317EXPORT_SYMBOL(bio_copy_data_iter);
1318
1319/**
45db54d5
KO
1320 * bio_copy_data - copy contents of data buffers from one bio to another
1321 * @src: source bio
1322 * @dst: destination bio
38a72dac
KO
1323 *
1324 * Stops when it reaches the end of either @src or @dst - that is, copies
1325 * min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of bios).
1326 */
1327void bio_copy_data(struct bio *dst, struct bio *src)
1328{
45db54d5
KO
1329 struct bvec_iter src_iter = src->bi_iter;
1330 struct bvec_iter dst_iter = dst->bi_iter;
1331
1332 bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
38a72dac 1333}
16ac3d63
KO
1334EXPORT_SYMBOL(bio_copy_data);
1335
491221f8 1336void bio_free_pages(struct bio *bio)
1dfa0f68
CH
1337{
1338 struct bio_vec *bvec;
6dc4f100 1339 struct bvec_iter_all iter_all;
1dfa0f68 1340
2b070cfe 1341 bio_for_each_segment_all(bvec, bio, iter_all)
1dfa0f68
CH
1342 __free_page(bvec->bv_page);
1343}
491221f8 1344EXPORT_SYMBOL(bio_free_pages);
1dfa0f68 1345
1da177e4
LT
1346/*
1347 * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
1348 * for performing direct-IO in BIOs.
1349 *
1350 * The problem is that we cannot run set_page_dirty() from interrupt context
1351 * because the required locks are not interrupt-safe. So what we can do is to
1352 * mark the pages dirty _before_ performing IO. And in interrupt context,
1353 * check that the pages are still dirty. If so, fine. If not, redirty them
1354 * in process context.
1355 *
1356 * We special-case compound pages here: normally this means reads into hugetlb
1357 * pages. The logic in here doesn't really work right for compound pages
1358 * because the VM does not uniformly chase down the head page in all cases.
1359 * But dirtiness of compound pages is pretty meaningless anyway: the VM doesn't
1360 * handle them at all. So we skip compound pages here at an early stage.
1361 *
1362 * Note that this code is very hard to test under normal circumstances because
1363 * direct-io pins the pages with get_user_pages(). This makes
1364 * is_page_cache_freeable return false, and the VM will not clean the pages.
0d5c3eba 1365 * But other code (eg, flusher threads) could clean the pages if they are mapped
1da177e4
LT
1366 * pagecache.
1367 *
1368 * Simply disabling the call to bio_set_pages_dirty() is a good way to test the
1369 * deferred bio dirtying paths.
1370 */
1371
1372/*
1373 * bio_set_pages_dirty() will mark all the bio's pages as dirty.
1374 */
1375void bio_set_pages_dirty(struct bio *bio)
1376{
cb34e057 1377 struct bio_vec *bvec;
6dc4f100 1378 struct bvec_iter_all iter_all;
1da177e4 1379
2b070cfe 1380 bio_for_each_segment_all(bvec, bio, iter_all) {
3bb50983
CH
1381 if (!PageCompound(bvec->bv_page))
1382 set_page_dirty_lock(bvec->bv_page);
1da177e4
LT
1383 }
1384}
1385
1da177e4
LT
1386/*
1387 * bio_check_pages_dirty() will check that all the BIO's pages are still dirty.
1388 * If they are, then fine. If, however, some pages are clean then they must
1389 * have been written out during the direct-IO read. So we take another ref on
24d5493f 1390 * the BIO and re-dirty the pages in process context.
1da177e4
LT
1391 *
1392 * It is expected that bio_check_pages_dirty() will wholly own the BIO from
ea1754a0
KS
1393 * here on. It will run one put_page() against each page and will run one
1394 * bio_put() against the BIO.
1da177e4
LT
1395 */
1396
65f27f38 1397static void bio_dirty_fn(struct work_struct *work);
1da177e4 1398
65f27f38 1399static DECLARE_WORK(bio_dirty_work, bio_dirty_fn);
1da177e4
LT
1400static DEFINE_SPINLOCK(bio_dirty_lock);
1401static struct bio *bio_dirty_list;
1402
1403/*
1404 * This runs in process context
1405 */
65f27f38 1406static void bio_dirty_fn(struct work_struct *work)
1da177e4 1407{
24d5493f 1408 struct bio *bio, *next;
1da177e4 1409
24d5493f
CH
1410 spin_lock_irq(&bio_dirty_lock);
1411 next = bio_dirty_list;
1da177e4 1412 bio_dirty_list = NULL;
24d5493f 1413 spin_unlock_irq(&bio_dirty_lock);
1da177e4 1414
24d5493f
CH
1415 while ((bio = next) != NULL) {
1416 next = bio->bi_private;
1da177e4 1417
d241a95f 1418 bio_release_pages(bio, true);
1da177e4 1419 bio_put(bio);
1da177e4
LT
1420 }
1421}
1422
1423void bio_check_pages_dirty(struct bio *bio)
1424{
cb34e057 1425 struct bio_vec *bvec;
24d5493f 1426 unsigned long flags;
6dc4f100 1427 struct bvec_iter_all iter_all;
1da177e4 1428
2b070cfe 1429 bio_for_each_segment_all(bvec, bio, iter_all) {
24d5493f
CH
1430 if (!PageDirty(bvec->bv_page) && !PageCompound(bvec->bv_page))
1431 goto defer;
1da177e4
LT
1432 }
1433
d241a95f 1434 bio_release_pages(bio, false);
24d5493f
CH
1435 bio_put(bio);
1436 return;
1437defer:
1438 spin_lock_irqsave(&bio_dirty_lock, flags);
1439 bio->bi_private = bio_dirty_list;
1440 bio_dirty_list = bio;
1441 spin_unlock_irqrestore(&bio_dirty_lock, flags);
1442 schedule_work(&bio_dirty_work);
1da177e4
LT
1443}
1444
c4cf5261
JA
1445static inline bool bio_remaining_done(struct bio *bio)
1446{
1447 /*
1448 * If we're not chaining, then ->__bi_remaining is always 1 and
1449 * we always end io on the first invocation.
1450 */
1451 if (!bio_flagged(bio, BIO_CHAIN))
1452 return true;
1453
1454 BUG_ON(atomic_read(&bio->__bi_remaining) <= 0);
1455
326e1dbb 1456 if (atomic_dec_and_test(&bio->__bi_remaining)) {
b7c44ed9 1457 bio_clear_flag(bio, BIO_CHAIN);
c4cf5261 1458 return true;
326e1dbb 1459 }
c4cf5261
JA
1460
1461 return false;
1462}
1463
1da177e4
LT
1464/**
1465 * bio_endio - end I/O on a bio
1466 * @bio: bio
1da177e4
LT
1467 *
1468 * Description:
4246a0b6
CH
1469 * bio_endio() will end I/O on the whole bio. bio_endio() is the preferred
1470 * way to end I/O on a bio. No one should call bi_end_io() directly on a
1471 * bio unless they own it and thus know that it has an end_io function.
fbbaf700
N
1472 *
1473 * bio_endio() can be called several times on a bio that has been chained
1474 * using bio_chain(). The ->bi_end_io() function will only be called the
60b6a7e6 1475 * last time.
1da177e4 1476 **/
4246a0b6 1477void bio_endio(struct bio *bio)
1da177e4 1478{
ba8c6967 1479again:
2b885517 1480 if (!bio_remaining_done(bio))
ba8c6967 1481 return;
7c20f116
CH
1482 if (!bio_integrity_endio(bio))
1483 return;
1da177e4 1484
a647a524 1485 if (bio->bi_bdev && bio_flagged(bio, BIO_TRACKED))
309dca30 1486 rq_qos_done_bio(bio->bi_bdev->bd_disk->queue, bio);
67b42d0b 1487
60b6a7e6
EH
1488 if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
1489 trace_block_bio_complete(bio->bi_bdev->bd_disk->queue, bio);
1490 bio_clear_flag(bio, BIO_TRACE_COMPLETION);
1491 }
1492
ba8c6967
CH
1493 /*
1494 * Need to have a real endio function for chained bios, otherwise
1495 * various corner cases will break (like stacking block devices that
1496 * save/restore bi_end_io) - however, we want to avoid unbounded
1497 * recursion and blowing the stack. Tail call optimization would
1498 * handle this, but compiling with frame pointers also disables
1499 * gcc's sibling call optimization.
1500 */
1501 if (bio->bi_end_io == bio_chain_endio) {
1502 bio = __bio_chain_endio(bio);
1503 goto again;
196d38bc 1504 }
ba8c6967 1505
9e234eea 1506 blk_throtl_bio_endio(bio);
b222dd2f
SL
1507 /* release cgroup info */
1508 bio_uninit(bio);
ba8c6967
CH
1509 if (bio->bi_end_io)
1510 bio->bi_end_io(bio);
1da177e4 1511}
a112a71d 1512EXPORT_SYMBOL(bio_endio);
1da177e4 1513
20d0189b
KO
1514/**
1515 * bio_split - split a bio
1516 * @bio: bio to split
1517 * @sectors: number of sectors to split from the front of @bio
1518 * @gfp: gfp mask
1519 * @bs: bio set to allocate from
1520 *
1521 * Allocates and returns a new bio which represents @sectors from the start of
1522 * @bio, and updates @bio to represent the remaining sectors.
1523 *
f3f5da62 1524 * Unless this is a discard request the newly allocated bio will point
dad77584
BVA
1525 * to @bio's bi_io_vec. It is the caller's responsibility to ensure that
1526 * neither @bio nor @bs are freed before the split bio.
20d0189b
KO
1527 */
1528struct bio *bio_split(struct bio *bio, int sectors,
1529 gfp_t gfp, struct bio_set *bs)
1530{
f341a4d3 1531 struct bio *split;
20d0189b
KO
1532
1533 BUG_ON(sectors <= 0);
1534 BUG_ON(sectors >= bio_sectors(bio));
1535
0512a75b
KB
1536 /* Zone append commands cannot be split */
1537 if (WARN_ON_ONCE(bio_op(bio) == REQ_OP_ZONE_APPEND))
1538 return NULL;
1539
f9d03f96 1540 split = bio_clone_fast(bio, gfp, bs);
20d0189b
KO
1541 if (!split)
1542 return NULL;
1543
1544 split->bi_iter.bi_size = sectors << 9;
1545
1546 if (bio_integrity(split))
fbd08e76 1547 bio_integrity_trim(split);
20d0189b
KO
1548
1549 bio_advance(bio, split->bi_iter.bi_size);
1550
fbbaf700 1551 if (bio_flagged(bio, BIO_TRACE_COMPLETION))
20d59023 1552 bio_set_flag(split, BIO_TRACE_COMPLETION);
fbbaf700 1553
20d0189b
KO
1554 return split;
1555}
1556EXPORT_SYMBOL(bio_split);
1557
6678d83f
KO
1558/**
1559 * bio_trim - trim a bio
1560 * @bio: bio to trim
1561 * @offset: number of sectors to trim from the front of @bio
1562 * @size: size we want to trim @bio to, in sectors
e83502ca
CK
1563 *
1564 * This function is typically used for bios that are cloned and submitted
1565 * to the underlying device in parts.
6678d83f 1566 */
e83502ca 1567void bio_trim(struct bio *bio, sector_t offset, sector_t size)
6678d83f 1568{
e83502ca
CK
1569 if (WARN_ON_ONCE(offset > BIO_MAX_SECTORS || size > BIO_MAX_SECTORS ||
1570 offset + size > bio->bi_iter.bi_size))
1571 return;
6678d83f
KO
1572
1573 size <<= 9;
4f024f37 1574 if (offset == 0 && size == bio->bi_iter.bi_size)
6678d83f
KO
1575 return;
1576
6678d83f 1577 bio_advance(bio, offset << 9);
4f024f37 1578 bio->bi_iter.bi_size = size;
376a78ab
DM
1579
1580 if (bio_integrity(bio))
fbd08e76 1581 bio_integrity_trim(bio);
6678d83f
KO
1582}
1583EXPORT_SYMBOL_GPL(bio_trim);
1584
1da177e4
LT
1585/*
1586 * create memory pools for biovec's in a bio_set.
1587 * use the global biovec slabs created for general use.
1588 */
8aa6ba2f 1589int biovec_init_pool(mempool_t *pool, int pool_entries)
1da177e4 1590{
7a800a20 1591 struct biovec_slab *bp = bvec_slabs + ARRAY_SIZE(bvec_slabs) - 1;
1da177e4 1592
8aa6ba2f 1593 return mempool_init_slab_pool(pool, pool_entries, bp->slab);
1da177e4
LT
1594}
1595
917a38c7
KO
1596/*
1597 * bioset_exit - exit a bioset initialized with bioset_init()
1598 *
1599 * May be called on a zeroed but uninitialized bioset (i.e. allocated with
1600 * kzalloc()).
1601 */
1602void bioset_exit(struct bio_set *bs)
1da177e4 1603{
be4d234d 1604 bio_alloc_cache_destroy(bs);
df2cb6da
KO
1605 if (bs->rescue_workqueue)
1606 destroy_workqueue(bs->rescue_workqueue);
917a38c7 1607 bs->rescue_workqueue = NULL;
df2cb6da 1608
8aa6ba2f
KO
1609 mempool_exit(&bs->bio_pool);
1610 mempool_exit(&bs->bvec_pool);
9f060e22 1611
7878cba9 1612 bioset_integrity_free(bs);
917a38c7
KO
1613 if (bs->bio_slab)
1614 bio_put_slab(bs);
1615 bs->bio_slab = NULL;
1616}
1617EXPORT_SYMBOL(bioset_exit);
1da177e4 1618
917a38c7
KO
1619/**
1620 * bioset_init - Initialize a bio_set
dad08527 1621 * @bs: pool to initialize
917a38c7
KO
1622 * @pool_size: Number of bio and bio_vecs to cache in the mempool
1623 * @front_pad: Number of bytes to allocate in front of the returned bio
1624 * @flags: Flags to modify behavior, currently %BIOSET_NEED_BVECS
1625 * and %BIOSET_NEED_RESCUER
1626 *
dad08527
KO
1627 * Description:
1628 * Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller
1629 * to ask for a number of bytes to be allocated in front of the bio.
1630 * Front pad allocation is useful for embedding the bio inside
1631 * another structure, to avoid allocating extra data to go with the bio.
1632 * Note that the bio must be embedded at the END of that structure always,
1633 * or things will break badly.
1634 * If %BIOSET_NEED_BVECS is set in @flags, a separate pool will be allocated
1635 * for allocating iovecs. This pool is not needed e.g. for bio_clone_fast().
1636 * If %BIOSET_NEED_RESCUER is set, a workqueue is created which can be used to
1637 * dispatch queued requests when the mempool runs out of space.
1638 *
917a38c7
KO
1639 */
1640int bioset_init(struct bio_set *bs,
1641 unsigned int pool_size,
1642 unsigned int front_pad,
1643 int flags)
1644{
917a38c7 1645 bs->front_pad = front_pad;
9f180e31
ML
1646 if (flags & BIOSET_NEED_BVECS)
1647 bs->back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec);
1648 else
1649 bs->back_pad = 0;
917a38c7
KO
1650
1651 spin_lock_init(&bs->rescue_lock);
1652 bio_list_init(&bs->rescue_list);
1653 INIT_WORK(&bs->rescue_work, bio_alloc_rescue);
1654
49d1ec85 1655 bs->bio_slab = bio_find_or_create_slab(bs);
917a38c7
KO
1656 if (!bs->bio_slab)
1657 return -ENOMEM;
1658
1659 if (mempool_init_slab_pool(&bs->bio_pool, pool_size, bs->bio_slab))
1660 goto bad;
1661
1662 if ((flags & BIOSET_NEED_BVECS) &&
1663 biovec_init_pool(&bs->bvec_pool, pool_size))
1664 goto bad;
1665
be4d234d
JA
1666 if (flags & BIOSET_NEED_RESCUER) {
1667 bs->rescue_workqueue = alloc_workqueue("bioset",
1668 WQ_MEM_RECLAIM, 0);
1669 if (!bs->rescue_workqueue)
1670 goto bad;
1671 }
1672 if (flags & BIOSET_PERCPU_CACHE) {
1673 bs->cache = alloc_percpu(struct bio_alloc_cache);
1674 if (!bs->cache)
1675 goto bad;
1676 cpuhp_state_add_instance_nocalls(CPUHP_BIO_DEAD, &bs->cpuhp_dead);
1677 }
917a38c7
KO
1678
1679 return 0;
1680bad:
1681 bioset_exit(bs);
1682 return -ENOMEM;
1683}
1684EXPORT_SYMBOL(bioset_init);
1685
28e89fd9
JA
1686/*
1687 * Initialize and setup a new bio_set, based on the settings from
1688 * another bio_set.
1689 */
1690int bioset_init_from_src(struct bio_set *bs, struct bio_set *src)
1691{
1692 int flags;
1693
1694 flags = 0;
1695 if (src->bvec_pool.min_nr)
1696 flags |= BIOSET_NEED_BVECS;
1697 if (src->rescue_workqueue)
1698 flags |= BIOSET_NEED_RESCUER;
1699
1700 return bioset_init(bs, src->bio_pool.min_nr, src->front_pad, flags);
1701}
1702EXPORT_SYMBOL(bioset_init_from_src);
1703
be4d234d
JA
1704/**
1705 * bio_alloc_kiocb - Allocate a bio from bio_set based on kiocb
1706 * @kiocb: kiocb describing the IO
0ef47db1 1707 * @nr_vecs: number of iovecs to pre-allocate
be4d234d
JA
1708 * @bs: bio_set to allocate from
1709 *
1710 * Description:
1711 * Like @bio_alloc_bioset, but pass in the kiocb. The kiocb is only
1712 * used to check if we should dip into the per-cpu bio_set allocation
3d5b3fbe
JA
1713 * cache. The allocation uses GFP_KERNEL internally. On return, the
1714 * bio is marked BIO_PERCPU_CACHEABLE, and the final put of the bio
1715 * MUST be done from process context, not hard/soft IRQ.
be4d234d
JA
1716 *
1717 */
1718struct bio *bio_alloc_kiocb(struct kiocb *kiocb, unsigned short nr_vecs,
1719 struct bio_set *bs)
1720{
1721 struct bio_alloc_cache *cache;
1722 struct bio *bio;
1723
1724 if (!(kiocb->ki_flags & IOCB_ALLOC_CACHE) || nr_vecs > BIO_INLINE_VECS)
1725 return bio_alloc_bioset(GFP_KERNEL, nr_vecs, bs);
1726
1727 cache = per_cpu_ptr(bs->cache, get_cpu());
1728 bio = bio_list_pop(&cache->free_list);
1729 if (bio) {
1730 cache->nr--;
1731 put_cpu();
1732 bio_init(bio, nr_vecs ? bio->bi_inline_vecs : NULL, nr_vecs);
1733 bio->bi_pool = bs;
1734 bio_set_flag(bio, BIO_PERCPU_CACHE);
1735 return bio;
1736 }
1737 put_cpu();
1738 bio = bio_alloc_bioset(GFP_KERNEL, nr_vecs, bs);
1739 bio_set_flag(bio, BIO_PERCPU_CACHE);
1740 return bio;
1741}
1742EXPORT_SYMBOL_GPL(bio_alloc_kiocb);
1743
de76fd89 1744static int __init init_bio(void)
1da177e4
LT
1745{
1746 int i;
1747
7878cba9 1748 bio_integrity_init();
1da177e4 1749
de76fd89
CH
1750 for (i = 0; i < ARRAY_SIZE(bvec_slabs); i++) {
1751 struct biovec_slab *bvs = bvec_slabs + i;
a7fcd37c 1752
de76fd89
CH
1753 bvs->slab = kmem_cache_create(bvs->name,
1754 bvs->nr_vecs * sizeof(struct bio_vec), 0,
1755 SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
1da177e4 1756 }
1da177e4 1757
be4d234d
JA
1758 cpuhp_setup_state_multi(CPUHP_BIO_DEAD, "block/bio:dead", NULL,
1759 bio_cpu_dead);
1760
f4f8154a 1761 if (bioset_init(&fs_bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS))
1da177e4
LT
1762 panic("bio: can't allocate bios\n");
1763
f4f8154a 1764 if (bioset_integrity_create(&fs_bio_set, BIO_POOL_SIZE))
a91a2785
MP
1765 panic("bio: can't create integrity pool\n");
1766
1da177e4
LT
1767 return 0;
1768}
1da177e4 1769subsys_initcall(init_bio);