staging: most: dim2: use if statements instead of ?: expressions
[linux-block.git] / block / bio.c
CommitLineData
8c16567d 1// SPDX-License-Identifier: GPL-2.0
1da177e4 2/*
0fe23479 3 * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk>
1da177e4
LT
4 */
5#include <linux/mm.h>
6#include <linux/swap.h>
7#include <linux/bio.h>
8#include <linux/blkdev.h>
a27bb332 9#include <linux/uio.h>
852c788f 10#include <linux/iocontext.h>
1da177e4
LT
11#include <linux/slab.h>
12#include <linux/init.h>
13#include <linux/kernel.h>
630d9c47 14#include <linux/export.h>
1da177e4
LT
15#include <linux/mempool.h>
16#include <linux/workqueue.h>
852c788f 17#include <linux/cgroup.h>
08e18eab 18#include <linux/blk-cgroup.h>
b4c5875d 19#include <linux/highmem.h>
de6a78b6 20#include <linux/sched/sysctl.h>
a892c8d5 21#include <linux/blk-crypto.h>
49d1ec85 22#include <linux/xarray.h>
1da177e4 23
55782138 24#include <trace/events/block.h>
9e234eea 25#include "blk.h"
67b42d0b 26#include "blk-rq-qos.h"
0bfc2455 27
be4d234d
JA
28struct bio_alloc_cache {
29 struct bio_list free_list;
30 unsigned int nr;
31};
32
de76fd89 33static struct biovec_slab {
6ac0b715
CH
34 int nr_vecs;
35 char *name;
36 struct kmem_cache *slab;
de76fd89
CH
37} bvec_slabs[] __read_mostly = {
38 { .nr_vecs = 16, .name = "biovec-16" },
39 { .nr_vecs = 64, .name = "biovec-64" },
40 { .nr_vecs = 128, .name = "biovec-128" },
a8affc03 41 { .nr_vecs = BIO_MAX_VECS, .name = "biovec-max" },
1da177e4 42};
6ac0b715 43
7a800a20
CH
44static struct biovec_slab *biovec_slab(unsigned short nr_vecs)
45{
46 switch (nr_vecs) {
47 /* smaller bios use inline vecs */
48 case 5 ... 16:
49 return &bvec_slabs[0];
50 case 17 ... 64:
51 return &bvec_slabs[1];
52 case 65 ... 128:
53 return &bvec_slabs[2];
a8affc03 54 case 129 ... BIO_MAX_VECS:
7a800a20
CH
55 return &bvec_slabs[3];
56 default:
57 BUG();
58 return NULL;
59 }
60}
1da177e4 61
1da177e4
LT
62/*
63 * fs_bio_set is the bio_set containing bio and iovec memory pools used by
64 * IO code that does not need private memory pools.
65 */
f4f8154a 66struct bio_set fs_bio_set;
3f86a82a 67EXPORT_SYMBOL(fs_bio_set);
1da177e4 68
bb799ca0
JA
69/*
70 * Our slab pool management
71 */
72struct bio_slab {
73 struct kmem_cache *slab;
74 unsigned int slab_ref;
75 unsigned int slab_size;
76 char name[8];
77};
78static DEFINE_MUTEX(bio_slab_lock);
49d1ec85 79static DEFINE_XARRAY(bio_slabs);
bb799ca0 80
49d1ec85 81static struct bio_slab *create_bio_slab(unsigned int size)
bb799ca0 82{
49d1ec85 83 struct bio_slab *bslab = kzalloc(sizeof(*bslab), GFP_KERNEL);
bb799ca0 84
49d1ec85
ML
85 if (!bslab)
86 return NULL;
bb799ca0 87
49d1ec85
ML
88 snprintf(bslab->name, sizeof(bslab->name), "bio-%d", size);
89 bslab->slab = kmem_cache_create(bslab->name, size,
90 ARCH_KMALLOC_MINALIGN, SLAB_HWCACHE_ALIGN, NULL);
91 if (!bslab->slab)
92 goto fail_alloc_slab;
bb799ca0 93
49d1ec85
ML
94 bslab->slab_ref = 1;
95 bslab->slab_size = size;
bb799ca0 96
49d1ec85
ML
97 if (!xa_err(xa_store(&bio_slabs, size, bslab, GFP_KERNEL)))
98 return bslab;
bb799ca0 99
49d1ec85 100 kmem_cache_destroy(bslab->slab);
bb799ca0 101
49d1ec85
ML
102fail_alloc_slab:
103 kfree(bslab);
104 return NULL;
105}
bb799ca0 106
49d1ec85
ML
107static inline unsigned int bs_bio_slab_size(struct bio_set *bs)
108{
9f180e31 109 return bs->front_pad + sizeof(struct bio) + bs->back_pad;
49d1ec85 110}
bb799ca0 111
49d1ec85
ML
112static struct kmem_cache *bio_find_or_create_slab(struct bio_set *bs)
113{
114 unsigned int size = bs_bio_slab_size(bs);
115 struct bio_slab *bslab;
bb799ca0 116
49d1ec85
ML
117 mutex_lock(&bio_slab_lock);
118 bslab = xa_load(&bio_slabs, size);
119 if (bslab)
120 bslab->slab_ref++;
121 else
122 bslab = create_bio_slab(size);
bb799ca0 123 mutex_unlock(&bio_slab_lock);
49d1ec85
ML
124
125 if (bslab)
126 return bslab->slab;
127 return NULL;
bb799ca0
JA
128}
129
130static void bio_put_slab(struct bio_set *bs)
131{
132 struct bio_slab *bslab = NULL;
49d1ec85 133 unsigned int slab_size = bs_bio_slab_size(bs);
bb799ca0
JA
134
135 mutex_lock(&bio_slab_lock);
136
49d1ec85 137 bslab = xa_load(&bio_slabs, slab_size);
bb799ca0
JA
138 if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n"))
139 goto out;
140
49d1ec85
ML
141 WARN_ON_ONCE(bslab->slab != bs->bio_slab);
142
bb799ca0
JA
143 WARN_ON(!bslab->slab_ref);
144
145 if (--bslab->slab_ref)
146 goto out;
147
49d1ec85
ML
148 xa_erase(&bio_slabs, slab_size);
149
bb799ca0 150 kmem_cache_destroy(bslab->slab);
49d1ec85 151 kfree(bslab);
bb799ca0
JA
152
153out:
154 mutex_unlock(&bio_slab_lock);
155}
156
7a800a20 157void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned short nr_vecs)
7ba1ba12 158{
a8affc03 159 BIO_BUG_ON(nr_vecs > BIO_MAX_VECS);
ed996a52 160
a8affc03 161 if (nr_vecs == BIO_MAX_VECS)
9f060e22 162 mempool_free(bv, pool);
7a800a20
CH
163 else if (nr_vecs > BIO_INLINE_VECS)
164 kmem_cache_free(biovec_slab(nr_vecs)->slab, bv);
bb799ca0 165}
bb799ca0 166
f2c3eb9b
CH
167/*
168 * Make the first allocation restricted and don't dump info on allocation
169 * failures, since we'll fall back to the mempool in case of failure.
170 */
171static inline gfp_t bvec_alloc_gfp(gfp_t gfp)
172{
173 return (gfp & ~(__GFP_DIRECT_RECLAIM | __GFP_IO)) |
174 __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
bb799ca0
JA
175}
176
7a800a20
CH
177struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs,
178 gfp_t gfp_mask)
1da177e4 179{
7a800a20 180 struct biovec_slab *bvs = biovec_slab(*nr_vecs);
1da177e4 181
7a800a20 182 if (WARN_ON_ONCE(!bvs))
7ff9345f 183 return NULL;
7ff9345f
JA
184
185 /*
7a800a20
CH
186 * Upgrade the nr_vecs request to take full advantage of the allocation.
187 * We also rely on this in the bvec_free path.
7ff9345f 188 */
7a800a20 189 *nr_vecs = bvs->nr_vecs;
7ff9345f 190
7ff9345f 191 /*
f007a3d6
CH
192 * Try a slab allocation first for all smaller allocations. If that
193 * fails and __GFP_DIRECT_RECLAIM is set retry with the mempool.
a8affc03 194 * The mempool is sized to handle up to BIO_MAX_VECS entries.
7ff9345f 195 */
a8affc03 196 if (*nr_vecs < BIO_MAX_VECS) {
f007a3d6 197 struct bio_vec *bvl;
1da177e4 198
f2c3eb9b 199 bvl = kmem_cache_alloc(bvs->slab, bvec_alloc_gfp(gfp_mask));
7a800a20 200 if (likely(bvl) || !(gfp_mask & __GFP_DIRECT_RECLAIM))
f007a3d6 201 return bvl;
a8affc03 202 *nr_vecs = BIO_MAX_VECS;
7ff9345f
JA
203 }
204
f007a3d6 205 return mempool_alloc(pool, gfp_mask);
1da177e4
LT
206}
207
9ae3b3f5 208void bio_uninit(struct bio *bio)
1da177e4 209{
db9819c7
CH
210#ifdef CONFIG_BLK_CGROUP
211 if (bio->bi_blkg) {
212 blkg_put(bio->bi_blkg);
213 bio->bi_blkg = NULL;
214 }
215#endif
ece841ab
JT
216 if (bio_integrity(bio))
217 bio_integrity_free(bio);
a892c8d5
ST
218
219 bio_crypt_free_ctx(bio);
4254bba1 220}
9ae3b3f5 221EXPORT_SYMBOL(bio_uninit);
7ba1ba12 222
4254bba1
KO
223static void bio_free(struct bio *bio)
224{
225 struct bio_set *bs = bio->bi_pool;
226 void *p;
227
9ae3b3f5 228 bio_uninit(bio);
4254bba1
KO
229
230 if (bs) {
7a800a20 231 bvec_free(&bs->bvec_pool, bio->bi_io_vec, bio->bi_max_vecs);
4254bba1
KO
232
233 /*
234 * If we have front padding, adjust the bio pointer before freeing
235 */
236 p = bio;
bb799ca0
JA
237 p -= bs->front_pad;
238
8aa6ba2f 239 mempool_free(p, &bs->bio_pool);
4254bba1
KO
240 } else {
241 /* Bio was allocated by bio_kmalloc() */
242 kfree(bio);
243 }
3676347a
PO
244}
245
9ae3b3f5
JA
246/*
247 * Users of this function have their own bio allocation. Subsequently,
248 * they must remember to pair any call to bio_init() with bio_uninit()
249 * when IO has completed, or when the bio is released.
250 */
3a83f467
ML
251void bio_init(struct bio *bio, struct bio_vec *table,
252 unsigned short max_vecs)
1da177e4 253{
da521626
JA
254 bio->bi_next = NULL;
255 bio->bi_bdev = NULL;
256 bio->bi_opf = 0;
257 bio->bi_flags = 0;
258 bio->bi_ioprio = 0;
259 bio->bi_write_hint = 0;
260 bio->bi_status = 0;
261 bio->bi_iter.bi_sector = 0;
262 bio->bi_iter.bi_size = 0;
263 bio->bi_iter.bi_idx = 0;
264 bio->bi_iter.bi_bvec_done = 0;
265 bio->bi_end_io = NULL;
266 bio->bi_private = NULL;
267#ifdef CONFIG_BLK_CGROUP
268 bio->bi_blkg = NULL;
269 bio->bi_issue.value = 0;
270#ifdef CONFIG_BLK_CGROUP_IOCOST
271 bio->bi_iocost_cost = 0;
272#endif
273#endif
274#ifdef CONFIG_BLK_INLINE_ENCRYPTION
275 bio->bi_crypt_context = NULL;
276#endif
277#ifdef CONFIG_BLK_DEV_INTEGRITY
278 bio->bi_integrity = NULL;
279#endif
280 bio->bi_vcnt = 0;
281
c4cf5261 282 atomic_set(&bio->__bi_remaining, 1);
dac56212 283 atomic_set(&bio->__bi_cnt, 1);
3a83f467 284
3a83f467 285 bio->bi_max_vecs = max_vecs;
da521626
JA
286 bio->bi_io_vec = table;
287 bio->bi_pool = NULL;
1da177e4 288}
a112a71d 289EXPORT_SYMBOL(bio_init);
1da177e4 290
f44b48c7
KO
291/**
292 * bio_reset - reinitialize a bio
293 * @bio: bio to reset
294 *
295 * Description:
296 * After calling bio_reset(), @bio will be in the same state as a freshly
297 * allocated bio returned bio bio_alloc_bioset() - the only fields that are
298 * preserved are the ones that are initialized by bio_alloc_bioset(). See
299 * comment in struct bio.
300 */
301void bio_reset(struct bio *bio)
302{
9ae3b3f5 303 bio_uninit(bio);
f44b48c7 304 memset(bio, 0, BIO_RESET_BYTES);
c4cf5261 305 atomic_set(&bio->__bi_remaining, 1);
f44b48c7
KO
306}
307EXPORT_SYMBOL(bio_reset);
308
38f8baae 309static struct bio *__bio_chain_endio(struct bio *bio)
196d38bc 310{
4246a0b6
CH
311 struct bio *parent = bio->bi_private;
312
3edf5346 313 if (bio->bi_status && !parent->bi_status)
4e4cbee9 314 parent->bi_status = bio->bi_status;
196d38bc 315 bio_put(bio);
38f8baae
CH
316 return parent;
317}
318
319static void bio_chain_endio(struct bio *bio)
320{
321 bio_endio(__bio_chain_endio(bio));
196d38bc
KO
322}
323
324/**
325 * bio_chain - chain bio completions
1051a902 326 * @bio: the target bio
5b874af6 327 * @parent: the parent bio of @bio
196d38bc
KO
328 *
329 * The caller won't have a bi_end_io called when @bio completes - instead,
330 * @parent's bi_end_io won't be called until both @parent and @bio have
331 * completed; the chained bio will also be freed when it completes.
332 *
333 * The caller must not set bi_private or bi_end_io in @bio.
334 */
335void bio_chain(struct bio *bio, struct bio *parent)
336{
337 BUG_ON(bio->bi_private || bio->bi_end_io);
338
339 bio->bi_private = parent;
340 bio->bi_end_io = bio_chain_endio;
c4cf5261 341 bio_inc_remaining(parent);
196d38bc
KO
342}
343EXPORT_SYMBOL(bio_chain);
344
df2cb6da
KO
345static void bio_alloc_rescue(struct work_struct *work)
346{
347 struct bio_set *bs = container_of(work, struct bio_set, rescue_work);
348 struct bio *bio;
349
350 while (1) {
351 spin_lock(&bs->rescue_lock);
352 bio = bio_list_pop(&bs->rescue_list);
353 spin_unlock(&bs->rescue_lock);
354
355 if (!bio)
356 break;
357
ed00aabd 358 submit_bio_noacct(bio);
df2cb6da
KO
359 }
360}
361
362static void punt_bios_to_rescuer(struct bio_set *bs)
363{
364 struct bio_list punt, nopunt;
365 struct bio *bio;
366
47e0fb46
N
367 if (WARN_ON_ONCE(!bs->rescue_workqueue))
368 return;
df2cb6da
KO
369 /*
370 * In order to guarantee forward progress we must punt only bios that
371 * were allocated from this bio_set; otherwise, if there was a bio on
372 * there for a stacking driver higher up in the stack, processing it
373 * could require allocating bios from this bio_set, and doing that from
374 * our own rescuer would be bad.
375 *
376 * Since bio lists are singly linked, pop them all instead of trying to
377 * remove from the middle of the list:
378 */
379
380 bio_list_init(&punt);
381 bio_list_init(&nopunt);
382
f5fe1b51 383 while ((bio = bio_list_pop(&current->bio_list[0])))
df2cb6da 384 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
f5fe1b51 385 current->bio_list[0] = nopunt;
df2cb6da 386
f5fe1b51
N
387 bio_list_init(&nopunt);
388 while ((bio = bio_list_pop(&current->bio_list[1])))
389 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
390 current->bio_list[1] = nopunt;
df2cb6da
KO
391
392 spin_lock(&bs->rescue_lock);
393 bio_list_merge(&bs->rescue_list, &punt);
394 spin_unlock(&bs->rescue_lock);
395
396 queue_work(bs->rescue_workqueue, &bs->rescue_work);
397}
398
1da177e4
LT
399/**
400 * bio_alloc_bioset - allocate a bio for I/O
519c8e9f 401 * @gfp_mask: the GFP_* mask given to the slab allocator
1da177e4 402 * @nr_iovecs: number of iovecs to pre-allocate
db18efac 403 * @bs: the bio_set to allocate from.
1da177e4 404 *
3175199a 405 * Allocate a bio from the mempools in @bs.
3f86a82a 406 *
3175199a
CH
407 * If %__GFP_DIRECT_RECLAIM is set then bio_alloc will always be able to
408 * allocate a bio. This is due to the mempool guarantees. To make this work,
409 * callers must never allocate more than 1 bio at a time from the general pool.
410 * Callers that need to allocate more than 1 bio must always submit the
411 * previously allocated bio for IO before attempting to allocate a new one.
412 * Failure to do so can cause deadlocks under memory pressure.
3f86a82a 413 *
3175199a
CH
414 * Note that when running under submit_bio_noacct() (i.e. any block driver),
415 * bios are not submitted until after you return - see the code in
416 * submit_bio_noacct() that converts recursion into iteration, to prevent
417 * stack overflows.
df2cb6da 418 *
3175199a
CH
419 * This would normally mean allocating multiple bios under submit_bio_noacct()
420 * would be susceptible to deadlocks, but we have
421 * deadlock avoidance code that resubmits any blocked bios from a rescuer
422 * thread.
df2cb6da 423 *
3175199a
CH
424 * However, we do not guarantee forward progress for allocations from other
425 * mempools. Doing multiple allocations from the same mempool under
426 * submit_bio_noacct() should be avoided - instead, use bio_set's front_pad
427 * for per bio allocations.
df2cb6da 428 *
3175199a 429 * Returns: Pointer to new bio on success, NULL on failure.
3f86a82a 430 */
0f2e6ab8 431struct bio *bio_alloc_bioset(gfp_t gfp_mask, unsigned short nr_iovecs,
7a88fa19 432 struct bio_set *bs)
1da177e4 433{
df2cb6da 434 gfp_t saved_gfp = gfp_mask;
451a9ebf
TH
435 struct bio *bio;
436 void *p;
437
3175199a
CH
438 /* should not use nobvec bioset for nr_iovecs > 0 */
439 if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) && nr_iovecs > 0))
440 return NULL;
df2cb6da 441
3175199a
CH
442 /*
443 * submit_bio_noacct() converts recursion to iteration; this means if
444 * we're running beneath it, any bios we allocate and submit will not be
445 * submitted (and thus freed) until after we return.
446 *
447 * This exposes us to a potential deadlock if we allocate multiple bios
448 * from the same bio_set() while running underneath submit_bio_noacct().
449 * If we were to allocate multiple bios (say a stacking block driver
450 * that was splitting bios), we would deadlock if we exhausted the
451 * mempool's reserve.
452 *
453 * We solve this, and guarantee forward progress, with a rescuer
454 * workqueue per bio_set. If we go to allocate and there are bios on
455 * current->bio_list, we first try the allocation without
456 * __GFP_DIRECT_RECLAIM; if that fails, we punt those bios we would be
457 * blocking to the rescuer workqueue before we retry with the original
458 * gfp_flags.
459 */
460 if (current->bio_list &&
461 (!bio_list_empty(&current->bio_list[0]) ||
462 !bio_list_empty(&current->bio_list[1])) &&
463 bs->rescue_workqueue)
464 gfp_mask &= ~__GFP_DIRECT_RECLAIM;
465
466 p = mempool_alloc(&bs->bio_pool, gfp_mask);
467 if (!p && gfp_mask != saved_gfp) {
468 punt_bios_to_rescuer(bs);
469 gfp_mask = saved_gfp;
8aa6ba2f 470 p = mempool_alloc(&bs->bio_pool, gfp_mask);
3f86a82a 471 }
451a9ebf
TH
472 if (unlikely(!p))
473 return NULL;
1da177e4 474
3175199a
CH
475 bio = p + bs->front_pad;
476 if (nr_iovecs > BIO_INLINE_VECS) {
3175199a 477 struct bio_vec *bvl = NULL;
34053979 478
7a800a20 479 bvl = bvec_alloc(&bs->bvec_pool, &nr_iovecs, gfp_mask);
df2cb6da
KO
480 if (!bvl && gfp_mask != saved_gfp) {
481 punt_bios_to_rescuer(bs);
482 gfp_mask = saved_gfp;
7a800a20 483 bvl = bvec_alloc(&bs->bvec_pool, &nr_iovecs, gfp_mask);
df2cb6da 484 }
34053979
IM
485 if (unlikely(!bvl))
486 goto err_free;
a38352e0 487
7a800a20 488 bio_init(bio, bvl, nr_iovecs);
3f86a82a 489 } else if (nr_iovecs) {
3175199a
CH
490 bio_init(bio, bio->bi_inline_vecs, BIO_INLINE_VECS);
491 } else {
492 bio_init(bio, NULL, 0);
1da177e4 493 }
3f86a82a
KO
494
495 bio->bi_pool = bs;
1da177e4 496 return bio;
34053979
IM
497
498err_free:
8aa6ba2f 499 mempool_free(p, &bs->bio_pool);
34053979 500 return NULL;
1da177e4 501}
a112a71d 502EXPORT_SYMBOL(bio_alloc_bioset);
1da177e4 503
3175199a
CH
504/**
505 * bio_kmalloc - kmalloc a bio for I/O
506 * @gfp_mask: the GFP_* mask given to the slab allocator
507 * @nr_iovecs: number of iovecs to pre-allocate
508 *
509 * Use kmalloc to allocate and initialize a bio.
510 *
511 * Returns: Pointer to new bio on success, NULL on failure.
512 */
0f2e6ab8 513struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned short nr_iovecs)
3175199a
CH
514{
515 struct bio *bio;
516
517 if (nr_iovecs > UIO_MAXIOV)
518 return NULL;
519
520 bio = kmalloc(struct_size(bio, bi_inline_vecs, nr_iovecs), gfp_mask);
521 if (unlikely(!bio))
522 return NULL;
523 bio_init(bio, nr_iovecs ? bio->bi_inline_vecs : NULL, nr_iovecs);
524 bio->bi_pool = NULL;
525 return bio;
526}
527EXPORT_SYMBOL(bio_kmalloc);
528
6f822e1b 529void zero_fill_bio(struct bio *bio)
1da177e4 530{
7988613b
KO
531 struct bio_vec bv;
532 struct bvec_iter iter;
1da177e4 533
ab6c340e
CH
534 bio_for_each_segment(bv, bio, iter)
535 memzero_bvec(&bv);
1da177e4 536}
6f822e1b 537EXPORT_SYMBOL(zero_fill_bio);
1da177e4 538
83c9c547
ML
539/**
540 * bio_truncate - truncate the bio to small size of @new_size
541 * @bio: the bio to be truncated
542 * @new_size: new size for truncating the bio
543 *
544 * Description:
545 * Truncate the bio to new size of @new_size. If bio_op(bio) is
546 * REQ_OP_READ, zero the truncated part. This function should only
547 * be used for handling corner cases, such as bio eod.
548 */
85a8ce62
ML
549void bio_truncate(struct bio *bio, unsigned new_size)
550{
551 struct bio_vec bv;
552 struct bvec_iter iter;
553 unsigned int done = 0;
554 bool truncated = false;
555
556 if (new_size >= bio->bi_iter.bi_size)
557 return;
558
83c9c547 559 if (bio_op(bio) != REQ_OP_READ)
85a8ce62
ML
560 goto exit;
561
562 bio_for_each_segment(bv, bio, iter) {
563 if (done + bv.bv_len > new_size) {
564 unsigned offset;
565
566 if (!truncated)
567 offset = new_size - done;
568 else
569 offset = 0;
570 zero_user(bv.bv_page, offset, bv.bv_len - offset);
571 truncated = true;
572 }
573 done += bv.bv_len;
574 }
575
576 exit:
577 /*
578 * Don't touch bvec table here and make it really immutable, since
579 * fs bio user has to retrieve all pages via bio_for_each_segment_all
580 * in its .end_bio() callback.
581 *
582 * It is enough to truncate bio by updating .bi_size since we can make
583 * correct bvec with the updated .bi_size for drivers.
584 */
585 bio->bi_iter.bi_size = new_size;
586}
587
29125ed6
CH
588/**
589 * guard_bio_eod - truncate a BIO to fit the block device
590 * @bio: bio to truncate
591 *
592 * This allows us to do IO even on the odd last sectors of a device, even if the
593 * block size is some multiple of the physical sector size.
594 *
595 * We'll just truncate the bio to the size of the device, and clear the end of
596 * the buffer head manually. Truly out-of-range accesses will turn into actual
597 * I/O errors, this only handles the "we need to be able to do I/O at the final
598 * sector" case.
599 */
600void guard_bio_eod(struct bio *bio)
601{
309dca30 602 sector_t maxsector = bdev_nr_sectors(bio->bi_bdev);
29125ed6
CH
603
604 if (!maxsector)
605 return;
606
607 /*
608 * If the *whole* IO is past the end of the device,
609 * let it through, and the IO layer will turn it into
610 * an EIO.
611 */
612 if (unlikely(bio->bi_iter.bi_sector >= maxsector))
613 return;
614
615 maxsector -= bio->bi_iter.bi_sector;
616 if (likely((bio->bi_iter.bi_size >> 9) <= maxsector))
617 return;
618
619 bio_truncate(bio, maxsector << 9);
620}
621
be4d234d
JA
622#define ALLOC_CACHE_MAX 512
623#define ALLOC_CACHE_SLACK 64
624
625static void bio_alloc_cache_prune(struct bio_alloc_cache *cache,
626 unsigned int nr)
627{
628 unsigned int i = 0;
629 struct bio *bio;
630
631 while ((bio = bio_list_pop(&cache->free_list)) != NULL) {
632 cache->nr--;
633 bio_free(bio);
634 if (++i == nr)
635 break;
636 }
637}
638
639static int bio_cpu_dead(unsigned int cpu, struct hlist_node *node)
640{
641 struct bio_set *bs;
642
643 bs = hlist_entry_safe(node, struct bio_set, cpuhp_dead);
644 if (bs->cache) {
645 struct bio_alloc_cache *cache = per_cpu_ptr(bs->cache, cpu);
646
647 bio_alloc_cache_prune(cache, -1U);
648 }
649 return 0;
650}
651
652static void bio_alloc_cache_destroy(struct bio_set *bs)
653{
654 int cpu;
655
656 if (!bs->cache)
657 return;
658
659 cpuhp_state_remove_instance_nocalls(CPUHP_BIO_DEAD, &bs->cpuhp_dead);
660 for_each_possible_cpu(cpu) {
661 struct bio_alloc_cache *cache;
662
663 cache = per_cpu_ptr(bs->cache, cpu);
664 bio_alloc_cache_prune(cache, -1U);
665 }
666 free_percpu(bs->cache);
667}
668
1da177e4
LT
669/**
670 * bio_put - release a reference to a bio
671 * @bio: bio to release reference to
672 *
673 * Description:
674 * Put a reference to a &struct bio, either one you have gotten with
9b10f6a9 675 * bio_alloc, bio_get or bio_clone_*. The last put of a bio will free it.
1da177e4
LT
676 **/
677void bio_put(struct bio *bio)
678{
be4d234d 679 if (unlikely(bio_flagged(bio, BIO_REFFED))) {
dac56212 680 BIO_BUG_ON(!atomic_read(&bio->__bi_cnt));
be4d234d
JA
681 if (!atomic_dec_and_test(&bio->__bi_cnt))
682 return;
683 }
dac56212 684
be4d234d
JA
685 if (bio_flagged(bio, BIO_PERCPU_CACHE)) {
686 struct bio_alloc_cache *cache;
687
688 bio_uninit(bio);
689 cache = per_cpu_ptr(bio->bi_pool->cache, get_cpu());
690 bio_list_add_head(&cache->free_list, bio);
691 if (++cache->nr > ALLOC_CACHE_MAX + ALLOC_CACHE_SLACK)
692 bio_alloc_cache_prune(cache, ALLOC_CACHE_SLACK);
693 put_cpu();
694 } else {
695 bio_free(bio);
dac56212 696 }
1da177e4 697}
a112a71d 698EXPORT_SYMBOL(bio_put);
1da177e4 699
59d276fe
KO
700/**
701 * __bio_clone_fast - clone a bio that shares the original bio's biovec
702 * @bio: destination bio
703 * @bio_src: bio to clone
704 *
705 * Clone a &bio. Caller will own the returned bio, but not
706 * the actual data it points to. Reference count of returned
707 * bio will be one.
708 *
709 * Caller must ensure that @bio_src is not freed before @bio.
710 */
711void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
712{
7a800a20 713 WARN_ON_ONCE(bio->bi_pool && bio->bi_max_vecs);
59d276fe
KO
714
715 /*
309dca30 716 * most users will be overriding ->bi_bdev with a new target,
59d276fe
KO
717 * so we don't set nor calculate new physical/hw segment counts here
718 */
309dca30 719 bio->bi_bdev = bio_src->bi_bdev;
b7c44ed9 720 bio_set_flag(bio, BIO_CLONED);
111be883
SL
721 if (bio_flagged(bio_src, BIO_THROTTLED))
722 bio_set_flag(bio, BIO_THROTTLED);
46bbf653
CH
723 if (bio_flagged(bio_src, BIO_REMAPPED))
724 bio_set_flag(bio, BIO_REMAPPED);
1eff9d32 725 bio->bi_opf = bio_src->bi_opf;
ca474b73 726 bio->bi_ioprio = bio_src->bi_ioprio;
cb6934f8 727 bio->bi_write_hint = bio_src->bi_write_hint;
59d276fe
KO
728 bio->bi_iter = bio_src->bi_iter;
729 bio->bi_io_vec = bio_src->bi_io_vec;
20bd723e 730
db6638d7 731 bio_clone_blkg_association(bio, bio_src);
e439bedf 732 blkcg_bio_issue_init(bio);
59d276fe
KO
733}
734EXPORT_SYMBOL(__bio_clone_fast);
735
736/**
737 * bio_clone_fast - clone a bio that shares the original bio's biovec
738 * @bio: bio to clone
739 * @gfp_mask: allocation priority
740 * @bs: bio_set to allocate from
741 *
742 * Like __bio_clone_fast, only also allocates the returned bio
743 */
744struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs)
745{
746 struct bio *b;
747
748 b = bio_alloc_bioset(gfp_mask, 0, bs);
749 if (!b)
750 return NULL;
751
752 __bio_clone_fast(b, bio);
753
07560151
EB
754 if (bio_crypt_clone(b, bio, gfp_mask) < 0)
755 goto err_put;
a892c8d5 756
07560151
EB
757 if (bio_integrity(bio) &&
758 bio_integrity_clone(b, bio, gfp_mask) < 0)
759 goto err_put;
59d276fe
KO
760
761 return b;
07560151
EB
762
763err_put:
764 bio_put(b);
765 return NULL;
59d276fe
KO
766}
767EXPORT_SYMBOL(bio_clone_fast);
768
5cbd28e3
CH
769const char *bio_devname(struct bio *bio, char *buf)
770{
309dca30 771 return bdevname(bio->bi_bdev, buf);
5cbd28e3
CH
772}
773EXPORT_SYMBOL(bio_devname);
774
5919482e
ML
775static inline bool page_is_mergeable(const struct bio_vec *bv,
776 struct page *page, unsigned int len, unsigned int off,
ff896738 777 bool *same_page)
5919482e 778{
d8166519
MWO
779 size_t bv_end = bv->bv_offset + bv->bv_len;
780 phys_addr_t vec_end_addr = page_to_phys(bv->bv_page) + bv_end - 1;
5919482e
ML
781 phys_addr_t page_addr = page_to_phys(page);
782
783 if (vec_end_addr + 1 != page_addr + off)
784 return false;
785 if (xen_domain() && !xen_biovec_phys_mergeable(bv, page))
786 return false;
52d52d1c 787
ff896738 788 *same_page = ((vec_end_addr & PAGE_MASK) == page_addr);
d8166519
MWO
789 if (*same_page)
790 return true;
791 return (bv->bv_page + bv_end / PAGE_SIZE) == (page + off / PAGE_SIZE);
5919482e
ML
792}
793
e4581105
CH
794/*
795 * Try to merge a page into a segment, while obeying the hardware segment
796 * size limit. This is not for normal read/write bios, but for passthrough
797 * or Zone Append operations that we can't split.
798 */
799static bool bio_try_merge_hw_seg(struct request_queue *q, struct bio *bio,
800 struct page *page, unsigned len,
801 unsigned offset, bool *same_page)
489fbbcb 802{
384209cd 803 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
489fbbcb
ML
804 unsigned long mask = queue_segment_boundary(q);
805 phys_addr_t addr1 = page_to_phys(bv->bv_page) + bv->bv_offset;
806 phys_addr_t addr2 = page_to_phys(page) + offset + len - 1;
807
808 if ((addr1 | mask) != (addr2 | mask))
809 return false;
489fbbcb
ML
810 if (bv->bv_len + len > queue_max_segment_size(q))
811 return false;
384209cd 812 return __bio_try_merge_page(bio, page, len, offset, same_page);
489fbbcb
ML
813}
814
1da177e4 815/**
e4581105
CH
816 * bio_add_hw_page - attempt to add a page to a bio with hw constraints
817 * @q: the target queue
818 * @bio: destination bio
819 * @page: page to add
820 * @len: vec entry length
821 * @offset: vec entry offset
822 * @max_sectors: maximum number of sectors that can be added
823 * @same_page: return if the segment has been merged inside the same page
c66a14d0 824 *
e4581105
CH
825 * Add a page to a bio while respecting the hardware max_sectors, max_segment
826 * and gap limitations.
1da177e4 827 */
e4581105 828int bio_add_hw_page(struct request_queue *q, struct bio *bio,
19047087 829 struct page *page, unsigned int len, unsigned int offset,
e4581105 830 unsigned int max_sectors, bool *same_page)
1da177e4 831{
1da177e4
LT
832 struct bio_vec *bvec;
833
e4581105 834 if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
1da177e4
LT
835 return 0;
836
e4581105 837 if (((bio->bi_iter.bi_size + len) >> 9) > max_sectors)
1da177e4
LT
838 return 0;
839
80cfd548 840 if (bio->bi_vcnt > 0) {
e4581105 841 if (bio_try_merge_hw_seg(q, bio, page, len, offset, same_page))
384209cd 842 return len;
320ea869
CH
843
844 /*
845 * If the queue doesn't support SG gaps and adding this segment
846 * would create a gap, disallow it.
847 */
384209cd 848 bvec = &bio->bi_io_vec[bio->bi_vcnt - 1];
320ea869
CH
849 if (bvec_gap_to_prev(q, bvec, offset))
850 return 0;
80cfd548
JA
851 }
852
79d08f89 853 if (bio_full(bio, len))
1da177e4
LT
854 return 0;
855
14ccb66b 856 if (bio->bi_vcnt >= queue_max_segments(q))
489fbbcb
ML
857 return 0;
858
fcbf6a08
ML
859 bvec = &bio->bi_io_vec[bio->bi_vcnt];
860 bvec->bv_page = page;
861 bvec->bv_len = len;
862 bvec->bv_offset = offset;
863 bio->bi_vcnt++;
dcdca753 864 bio->bi_iter.bi_size += len;
1da177e4
LT
865 return len;
866}
19047087 867
e4581105
CH
868/**
869 * bio_add_pc_page - attempt to add page to passthrough bio
870 * @q: the target queue
871 * @bio: destination bio
872 * @page: page to add
873 * @len: vec entry length
874 * @offset: vec entry offset
875 *
876 * Attempt to add a page to the bio_vec maplist. This can fail for a
877 * number of reasons, such as the bio being full or target block device
878 * limitations. The target block device must allow bio's up to PAGE_SIZE,
879 * so it is always possible to add a single page to an empty bio.
880 *
881 * This should only be used by passthrough bios.
882 */
19047087
ML
883int bio_add_pc_page(struct request_queue *q, struct bio *bio,
884 struct page *page, unsigned int len, unsigned int offset)
885{
d1916c86 886 bool same_page = false;
e4581105
CH
887 return bio_add_hw_page(q, bio, page, len, offset,
888 queue_max_hw_sectors(q), &same_page);
19047087 889}
a112a71d 890EXPORT_SYMBOL(bio_add_pc_page);
6e68af66 891
ae29333f
JT
892/**
893 * bio_add_zone_append_page - attempt to add page to zone-append bio
894 * @bio: destination bio
895 * @page: page to add
896 * @len: vec entry length
897 * @offset: vec entry offset
898 *
899 * Attempt to add a page to the bio_vec maplist of a bio that will be submitted
900 * for a zone-append request. This can fail for a number of reasons, such as the
901 * bio being full or the target block device is not a zoned block device or
902 * other limitations of the target block device. The target block device must
903 * allow bio's up to PAGE_SIZE, so it is always possible to add a single page
904 * to an empty bio.
905 *
906 * Returns: number of bytes added to the bio, or 0 in case of a failure.
907 */
908int bio_add_zone_append_page(struct bio *bio, struct page *page,
909 unsigned int len, unsigned int offset)
910{
582cd91f 911 struct request_queue *q = bio->bi_bdev->bd_disk->queue;
ae29333f
JT
912 bool same_page = false;
913
914 if (WARN_ON_ONCE(bio_op(bio) != REQ_OP_ZONE_APPEND))
915 return 0;
916
917 if (WARN_ON_ONCE(!blk_queue_is_zoned(q)))
918 return 0;
919
920 return bio_add_hw_page(q, bio, page, len, offset,
921 queue_max_zone_append_sectors(q), &same_page);
922}
923EXPORT_SYMBOL_GPL(bio_add_zone_append_page);
924
1da177e4 925/**
0aa69fd3
CH
926 * __bio_try_merge_page - try appending data to an existing bvec.
927 * @bio: destination bio
551879a4 928 * @page: start page to add
0aa69fd3 929 * @len: length of the data to add
551879a4 930 * @off: offset of the data relative to @page
ff896738 931 * @same_page: return if the segment has been merged inside the same page
1da177e4 932 *
0aa69fd3 933 * Try to add the data at @page + @off to the last bvec of @bio. This is a
3cf14889 934 * useful optimisation for file systems with a block size smaller than the
0aa69fd3
CH
935 * page size.
936 *
551879a4
ML
937 * Warn if (@len, @off) crosses pages in case that @same_page is true.
938 *
0aa69fd3 939 * Return %true on success or %false on failure.
1da177e4 940 */
0aa69fd3 941bool __bio_try_merge_page(struct bio *bio, struct page *page,
ff896738 942 unsigned int len, unsigned int off, bool *same_page)
1da177e4 943{
c66a14d0 944 if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
0aa69fd3 945 return false;
762380ad 946
cc90bc68 947 if (bio->bi_vcnt > 0) {
0aa69fd3 948 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
5919482e
ML
949
950 if (page_is_mergeable(bv, page, len, off, same_page)) {
35c820e7 951 if (bio->bi_iter.bi_size > UINT_MAX - len) {
2cd896a5 952 *same_page = false;
cc90bc68 953 return false;
2cd896a5 954 }
5919482e
ML
955 bv->bv_len += len;
956 bio->bi_iter.bi_size += len;
957 return true;
958 }
c66a14d0 959 }
0aa69fd3
CH
960 return false;
961}
962EXPORT_SYMBOL_GPL(__bio_try_merge_page);
c66a14d0 963
0aa69fd3 964/**
551879a4 965 * __bio_add_page - add page(s) to a bio in a new segment
0aa69fd3 966 * @bio: destination bio
551879a4
ML
967 * @page: start page to add
968 * @len: length of the data to add, may cross pages
969 * @off: offset of the data relative to @page, may cross pages
0aa69fd3
CH
970 *
971 * Add the data at @page + @off to @bio as a new bvec. The caller must ensure
972 * that @bio has space for another bvec.
973 */
974void __bio_add_page(struct bio *bio, struct page *page,
975 unsigned int len, unsigned int off)
976{
977 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt];
c66a14d0 978
0aa69fd3 979 WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
79d08f89 980 WARN_ON_ONCE(bio_full(bio, len));
0aa69fd3
CH
981
982 bv->bv_page = page;
983 bv->bv_offset = off;
984 bv->bv_len = len;
c66a14d0 985
c66a14d0 986 bio->bi_iter.bi_size += len;
0aa69fd3 987 bio->bi_vcnt++;
b8e24a93
JW
988
989 if (!bio_flagged(bio, BIO_WORKINGSET) && unlikely(PageWorkingset(page)))
990 bio_set_flag(bio, BIO_WORKINGSET);
0aa69fd3
CH
991}
992EXPORT_SYMBOL_GPL(__bio_add_page);
993
994/**
551879a4 995 * bio_add_page - attempt to add page(s) to bio
0aa69fd3 996 * @bio: destination bio
551879a4
ML
997 * @page: start page to add
998 * @len: vec entry length, may cross pages
999 * @offset: vec entry offset relative to @page, may cross pages
0aa69fd3 1000 *
551879a4 1001 * Attempt to add page(s) to the bio_vec maplist. This will only fail
0aa69fd3
CH
1002 * if either bio->bi_vcnt == bio->bi_max_vecs or it's a cloned bio.
1003 */
1004int bio_add_page(struct bio *bio, struct page *page,
1005 unsigned int len, unsigned int offset)
1006{
ff896738
CH
1007 bool same_page = false;
1008
1009 if (!__bio_try_merge_page(bio, page, len, offset, &same_page)) {
79d08f89 1010 if (bio_full(bio, len))
0aa69fd3
CH
1011 return 0;
1012 __bio_add_page(bio, page, len, offset);
1013 }
c66a14d0 1014 return len;
1da177e4 1015}
a112a71d 1016EXPORT_SYMBOL(bio_add_page);
1da177e4 1017
d241a95f 1018void bio_release_pages(struct bio *bio, bool mark_dirty)
7321ecbf
CH
1019{
1020 struct bvec_iter_all iter_all;
1021 struct bio_vec *bvec;
7321ecbf 1022
b2d0d991
CH
1023 if (bio_flagged(bio, BIO_NO_PAGE_REF))
1024 return;
1025
d241a95f
CH
1026 bio_for_each_segment_all(bvec, bio, iter_all) {
1027 if (mark_dirty && !PageCompound(bvec->bv_page))
1028 set_page_dirty_lock(bvec->bv_page);
7321ecbf 1029 put_page(bvec->bv_page);
d241a95f 1030 }
7321ecbf 1031}
29b2a3aa 1032EXPORT_SYMBOL_GPL(bio_release_pages);
7321ecbf 1033
7de55b7d 1034static void __bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter)
6d0c48ae 1035{
7a800a20 1036 WARN_ON_ONCE(bio->bi_max_vecs);
c42bca92
PB
1037
1038 bio->bi_vcnt = iter->nr_segs;
c42bca92
PB
1039 bio->bi_io_vec = (struct bio_vec *)iter->bvec;
1040 bio->bi_iter.bi_bvec_done = iter->iov_offset;
1041 bio->bi_iter.bi_size = iter->count;
ed97ce5e 1042 bio_set_flag(bio, BIO_NO_PAGE_REF);
977be012 1043 bio_set_flag(bio, BIO_CLONED);
7de55b7d 1044}
c42bca92 1045
7de55b7d
JT
1046static int bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter)
1047{
1048 __bio_iov_bvec_set(bio, iter);
c42bca92 1049 iov_iter_advance(iter, iter->count);
a10584c3 1050 return 0;
6d0c48ae
JA
1051}
1052
7de55b7d
JT
1053static int bio_iov_bvec_set_append(struct bio *bio, struct iov_iter *iter)
1054{
1055 struct request_queue *q = bio->bi_bdev->bd_disk->queue;
1056 struct iov_iter i = *iter;
1057
1058 iov_iter_truncate(&i, queue_max_zone_append_sectors(q) << 9);
1059 __bio_iov_bvec_set(bio, &i);
1060 iov_iter_advance(iter, i.count);
1061 return 0;
1062}
1063
d9cf3bd5
PB
1064static void bio_put_pages(struct page **pages, size_t size, size_t off)
1065{
1066 size_t i, nr = DIV_ROUND_UP(size + (off & ~PAGE_MASK), PAGE_SIZE);
1067
1068 for (i = 0; i < nr; i++)
1069 put_page(pages[i]);
1070}
1071
576ed913
CH
1072#define PAGE_PTRS_PER_BVEC (sizeof(struct bio_vec) / sizeof(struct page *))
1073
2cefe4db 1074/**
17d51b10 1075 * __bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
2cefe4db
KO
1076 * @bio: bio to add pages to
1077 * @iter: iov iterator describing the region to be mapped
1078 *
17d51b10 1079 * Pins pages from *iter and appends them to @bio's bvec array. The
2cefe4db 1080 * pages will have to be released using put_page() when done.
17d51b10 1081 * For multi-segment *iter, this function only adds pages from the
3cf14889 1082 * next non-empty segment of the iov iterator.
2cefe4db 1083 */
17d51b10 1084static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
2cefe4db 1085{
576ed913
CH
1086 unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt;
1087 unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt;
2cefe4db
KO
1088 struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
1089 struct page **pages = (struct page **)bv;
45691804 1090 bool same_page = false;
576ed913
CH
1091 ssize_t size, left;
1092 unsigned len, i;
b403ea24 1093 size_t offset;
576ed913
CH
1094
1095 /*
1096 * Move page array up in the allocated memory for the bio vecs as far as
1097 * possible so that we can start filling biovecs from the beginning
1098 * without overwriting the temporary page array.
1099 */
1100 BUILD_BUG_ON(PAGE_PTRS_PER_BVEC < 2);
1101 pages += entries_left * (PAGE_PTRS_PER_BVEC - 1);
2cefe4db 1102
35c820e7 1103 size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset);
2cefe4db
KO
1104 if (unlikely(size <= 0))
1105 return size ? size : -EFAULT;
2cefe4db 1106
576ed913
CH
1107 for (left = size, i = 0; left > 0; left -= len, i++) {
1108 struct page *page = pages[i];
2cefe4db 1109
576ed913 1110 len = min_t(size_t, PAGE_SIZE - offset, left);
45691804
CH
1111
1112 if (__bio_try_merge_page(bio, page, len, offset, &same_page)) {
1113 if (same_page)
1114 put_page(page);
1115 } else {
d9cf3bd5
PB
1116 if (WARN_ON_ONCE(bio_full(bio, len))) {
1117 bio_put_pages(pages + i, left, offset);
1118 return -EINVAL;
1119 }
45691804
CH
1120 __bio_add_page(bio, page, len, offset);
1121 }
576ed913 1122 offset = 0;
2cefe4db
KO
1123 }
1124
2cefe4db
KO
1125 iov_iter_advance(iter, size);
1126 return 0;
1127}
17d51b10 1128
0512a75b
KB
1129static int __bio_iov_append_get_pages(struct bio *bio, struct iov_iter *iter)
1130{
1131 unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt;
1132 unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt;
309dca30 1133 struct request_queue *q = bio->bi_bdev->bd_disk->queue;
0512a75b
KB
1134 unsigned int max_append_sectors = queue_max_zone_append_sectors(q);
1135 struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
1136 struct page **pages = (struct page **)bv;
1137 ssize_t size, left;
1138 unsigned len, i;
1139 size_t offset;
4977d121 1140 int ret = 0;
0512a75b
KB
1141
1142 if (WARN_ON_ONCE(!max_append_sectors))
1143 return 0;
1144
1145 /*
1146 * Move page array up in the allocated memory for the bio vecs as far as
1147 * possible so that we can start filling biovecs from the beginning
1148 * without overwriting the temporary page array.
1149 */
1150 BUILD_BUG_ON(PAGE_PTRS_PER_BVEC < 2);
1151 pages += entries_left * (PAGE_PTRS_PER_BVEC - 1);
1152
1153 size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset);
1154 if (unlikely(size <= 0))
1155 return size ? size : -EFAULT;
1156
1157 for (left = size, i = 0; left > 0; left -= len, i++) {
1158 struct page *page = pages[i];
1159 bool same_page = false;
1160
1161 len = min_t(size_t, PAGE_SIZE - offset, left);
1162 if (bio_add_hw_page(q, bio, page, len, offset,
4977d121 1163 max_append_sectors, &same_page) != len) {
d9cf3bd5 1164 bio_put_pages(pages + i, left, offset);
4977d121
NA
1165 ret = -EINVAL;
1166 break;
1167 }
0512a75b
KB
1168 if (same_page)
1169 put_page(page);
1170 offset = 0;
1171 }
1172
4977d121
NA
1173 iov_iter_advance(iter, size - left);
1174 return ret;
0512a75b
KB
1175}
1176
17d51b10 1177/**
6d0c48ae 1178 * bio_iov_iter_get_pages - add user or kernel pages to a bio
17d51b10 1179 * @bio: bio to add pages to
6d0c48ae
JA
1180 * @iter: iov iterator describing the region to be added
1181 *
1182 * This takes either an iterator pointing to user memory, or one pointing to
1183 * kernel pages (BVEC iterator). If we're adding user pages, we pin them and
1184 * map them into the kernel. On IO completion, the caller should put those
c42bca92
PB
1185 * pages. For bvec based iterators bio_iov_iter_get_pages() uses the provided
1186 * bvecs rather than copying them. Hence anyone issuing kiocb based IO needs
1187 * to ensure the bvecs and pages stay referenced until the submitted I/O is
1188 * completed by a call to ->ki_complete() or returns with an error other than
1189 * -EIOCBQUEUED. The caller needs to check if the bio is flagged BIO_NO_PAGE_REF
1190 * on IO completion. If it isn't, then pages should be released.
17d51b10 1191 *
17d51b10 1192 * The function tries, but does not guarantee, to pin as many pages as
5cd3ddc1 1193 * fit into the bio, or are requested in @iter, whatever is smaller. If
6d0c48ae
JA
1194 * MM encounters an error pinning the requested pages, it stops. Error
1195 * is returned only if 0 pages could be pinned.
0cf41e5e
PB
1196 *
1197 * It's intended for direct IO, so doesn't do PSI tracking, the caller is
1198 * responsible for setting BIO_WORKINGSET if necessary.
17d51b10
MW
1199 */
1200int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
1201{
c42bca92 1202 int ret = 0;
14eacf12 1203
c42bca92 1204 if (iov_iter_is_bvec(iter)) {
7de55b7d
JT
1205 if (bio_op(bio) == REQ_OP_ZONE_APPEND)
1206 return bio_iov_bvec_set_append(bio, iter);
ed97ce5e 1207 return bio_iov_bvec_set(bio, iter);
c42bca92 1208 }
17d51b10
MW
1209
1210 do {
86004515 1211 if (bio_op(bio) == REQ_OP_ZONE_APPEND)
0512a75b 1212 ret = __bio_iov_append_get_pages(bio, iter);
86004515
CH
1213 else
1214 ret = __bio_iov_iter_get_pages(bio, iter);
79d08f89 1215 } while (!ret && iov_iter_count(iter) && !bio_full(bio, 0));
17d51b10 1216
0cf41e5e
PB
1217 /* don't account direct I/O as memory stall */
1218 bio_clear_flag(bio, BIO_WORKINGSET);
14eacf12 1219 return bio->bi_vcnt ? 0 : ret;
17d51b10 1220}
29b2a3aa 1221EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages);
2cefe4db 1222
4246a0b6 1223static void submit_bio_wait_endio(struct bio *bio)
9e882242 1224{
65e53aab 1225 complete(bio->bi_private);
9e882242
KO
1226}
1227
1228/**
1229 * submit_bio_wait - submit a bio, and wait until it completes
9e882242
KO
1230 * @bio: The &struct bio which describes the I/O
1231 *
1232 * Simple wrapper around submit_bio(). Returns 0 on success, or the error from
1233 * bio_endio() on failure.
3d289d68
JK
1234 *
1235 * WARNING: Unlike to how submit_bio() is usually used, this function does not
1236 * result in bio reference to be consumed. The caller must drop the reference
1237 * on his own.
9e882242 1238 */
4e49ea4a 1239int submit_bio_wait(struct bio *bio)
9e882242 1240{
309dca30
CH
1241 DECLARE_COMPLETION_ONSTACK_MAP(done,
1242 bio->bi_bdev->bd_disk->lockdep_map);
de6a78b6 1243 unsigned long hang_check;
9e882242 1244
65e53aab 1245 bio->bi_private = &done;
9e882242 1246 bio->bi_end_io = submit_bio_wait_endio;
1eff9d32 1247 bio->bi_opf |= REQ_SYNC;
4e49ea4a 1248 submit_bio(bio);
de6a78b6
ML
1249
1250 /* Prevent hang_check timer from firing at us during very long I/O */
1251 hang_check = sysctl_hung_task_timeout_secs;
1252 if (hang_check)
1253 while (!wait_for_completion_io_timeout(&done,
1254 hang_check * (HZ/2)))
1255 ;
1256 else
1257 wait_for_completion_io(&done);
9e882242 1258
65e53aab 1259 return blk_status_to_errno(bio->bi_status);
9e882242
KO
1260}
1261EXPORT_SYMBOL(submit_bio_wait);
1262
054bdf64
KO
1263/**
1264 * bio_advance - increment/complete a bio by some number of bytes
1265 * @bio: bio to advance
1266 * @bytes: number of bytes to complete
1267 *
1268 * This updates bi_sector, bi_size and bi_idx; if the number of bytes to
1269 * complete doesn't align with a bvec boundary, then bv_len and bv_offset will
1270 * be updated on the last bvec as well.
1271 *
1272 * @bio will then represent the remaining, uncompleted portion of the io.
1273 */
1274void bio_advance(struct bio *bio, unsigned bytes)
1275{
1276 if (bio_integrity(bio))
1277 bio_integrity_advance(bio, bytes);
1278
a892c8d5 1279 bio_crypt_advance(bio, bytes);
4550dd6c 1280 bio_advance_iter(bio, &bio->bi_iter, bytes);
054bdf64
KO
1281}
1282EXPORT_SYMBOL(bio_advance);
1283
45db54d5
KO
1284void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
1285 struct bio *src, struct bvec_iter *src_iter)
16ac3d63 1286{
45db54d5 1287 while (src_iter->bi_size && dst_iter->bi_size) {
f8b679a0
CH
1288 struct bio_vec src_bv = bio_iter_iovec(src, *src_iter);
1289 struct bio_vec dst_bv = bio_iter_iovec(dst, *dst_iter);
1290 unsigned int bytes = min(src_bv.bv_len, dst_bv.bv_len);
1291 void *src_buf;
1292
1293 src_buf = bvec_kmap_local(&src_bv);
1294 memcpy_to_bvec(&dst_bv, src_buf);
1295 kunmap_local(src_buf);
6e6e811d 1296
22b56c29
PB
1297 bio_advance_iter_single(src, src_iter, bytes);
1298 bio_advance_iter_single(dst, dst_iter, bytes);
16ac3d63
KO
1299 }
1300}
38a72dac
KO
1301EXPORT_SYMBOL(bio_copy_data_iter);
1302
1303/**
45db54d5
KO
1304 * bio_copy_data - copy contents of data buffers from one bio to another
1305 * @src: source bio
1306 * @dst: destination bio
38a72dac
KO
1307 *
1308 * Stops when it reaches the end of either @src or @dst - that is, copies
1309 * min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of bios).
1310 */
1311void bio_copy_data(struct bio *dst, struct bio *src)
1312{
45db54d5
KO
1313 struct bvec_iter src_iter = src->bi_iter;
1314 struct bvec_iter dst_iter = dst->bi_iter;
1315
1316 bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
38a72dac 1317}
16ac3d63
KO
1318EXPORT_SYMBOL(bio_copy_data);
1319
491221f8 1320void bio_free_pages(struct bio *bio)
1dfa0f68
CH
1321{
1322 struct bio_vec *bvec;
6dc4f100 1323 struct bvec_iter_all iter_all;
1dfa0f68 1324
2b070cfe 1325 bio_for_each_segment_all(bvec, bio, iter_all)
1dfa0f68
CH
1326 __free_page(bvec->bv_page);
1327}
491221f8 1328EXPORT_SYMBOL(bio_free_pages);
1dfa0f68 1329
1da177e4
LT
1330/*
1331 * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
1332 * for performing direct-IO in BIOs.
1333 *
1334 * The problem is that we cannot run set_page_dirty() from interrupt context
1335 * because the required locks are not interrupt-safe. So what we can do is to
1336 * mark the pages dirty _before_ performing IO. And in interrupt context,
1337 * check that the pages are still dirty. If so, fine. If not, redirty them
1338 * in process context.
1339 *
1340 * We special-case compound pages here: normally this means reads into hugetlb
1341 * pages. The logic in here doesn't really work right for compound pages
1342 * because the VM does not uniformly chase down the head page in all cases.
1343 * But dirtiness of compound pages is pretty meaningless anyway: the VM doesn't
1344 * handle them at all. So we skip compound pages here at an early stage.
1345 *
1346 * Note that this code is very hard to test under normal circumstances because
1347 * direct-io pins the pages with get_user_pages(). This makes
1348 * is_page_cache_freeable return false, and the VM will not clean the pages.
0d5c3eba 1349 * But other code (eg, flusher threads) could clean the pages if they are mapped
1da177e4
LT
1350 * pagecache.
1351 *
1352 * Simply disabling the call to bio_set_pages_dirty() is a good way to test the
1353 * deferred bio dirtying paths.
1354 */
1355
1356/*
1357 * bio_set_pages_dirty() will mark all the bio's pages as dirty.
1358 */
1359void bio_set_pages_dirty(struct bio *bio)
1360{
cb34e057 1361 struct bio_vec *bvec;
6dc4f100 1362 struct bvec_iter_all iter_all;
1da177e4 1363
2b070cfe 1364 bio_for_each_segment_all(bvec, bio, iter_all) {
3bb50983
CH
1365 if (!PageCompound(bvec->bv_page))
1366 set_page_dirty_lock(bvec->bv_page);
1da177e4
LT
1367 }
1368}
1369
1da177e4
LT
1370/*
1371 * bio_check_pages_dirty() will check that all the BIO's pages are still dirty.
1372 * If they are, then fine. If, however, some pages are clean then they must
1373 * have been written out during the direct-IO read. So we take another ref on
24d5493f 1374 * the BIO and re-dirty the pages in process context.
1da177e4
LT
1375 *
1376 * It is expected that bio_check_pages_dirty() will wholly own the BIO from
ea1754a0
KS
1377 * here on. It will run one put_page() against each page and will run one
1378 * bio_put() against the BIO.
1da177e4
LT
1379 */
1380
65f27f38 1381static void bio_dirty_fn(struct work_struct *work);
1da177e4 1382
65f27f38 1383static DECLARE_WORK(bio_dirty_work, bio_dirty_fn);
1da177e4
LT
1384static DEFINE_SPINLOCK(bio_dirty_lock);
1385static struct bio *bio_dirty_list;
1386
1387/*
1388 * This runs in process context
1389 */
65f27f38 1390static void bio_dirty_fn(struct work_struct *work)
1da177e4 1391{
24d5493f 1392 struct bio *bio, *next;
1da177e4 1393
24d5493f
CH
1394 spin_lock_irq(&bio_dirty_lock);
1395 next = bio_dirty_list;
1da177e4 1396 bio_dirty_list = NULL;
24d5493f 1397 spin_unlock_irq(&bio_dirty_lock);
1da177e4 1398
24d5493f
CH
1399 while ((bio = next) != NULL) {
1400 next = bio->bi_private;
1da177e4 1401
d241a95f 1402 bio_release_pages(bio, true);
1da177e4 1403 bio_put(bio);
1da177e4
LT
1404 }
1405}
1406
1407void bio_check_pages_dirty(struct bio *bio)
1408{
cb34e057 1409 struct bio_vec *bvec;
24d5493f 1410 unsigned long flags;
6dc4f100 1411 struct bvec_iter_all iter_all;
1da177e4 1412
2b070cfe 1413 bio_for_each_segment_all(bvec, bio, iter_all) {
24d5493f
CH
1414 if (!PageDirty(bvec->bv_page) && !PageCompound(bvec->bv_page))
1415 goto defer;
1da177e4
LT
1416 }
1417
d241a95f 1418 bio_release_pages(bio, false);
24d5493f
CH
1419 bio_put(bio);
1420 return;
1421defer:
1422 spin_lock_irqsave(&bio_dirty_lock, flags);
1423 bio->bi_private = bio_dirty_list;
1424 bio_dirty_list = bio;
1425 spin_unlock_irqrestore(&bio_dirty_lock, flags);
1426 schedule_work(&bio_dirty_work);
1da177e4
LT
1427}
1428
c4cf5261
JA
1429static inline bool bio_remaining_done(struct bio *bio)
1430{
1431 /*
1432 * If we're not chaining, then ->__bi_remaining is always 1 and
1433 * we always end io on the first invocation.
1434 */
1435 if (!bio_flagged(bio, BIO_CHAIN))
1436 return true;
1437
1438 BUG_ON(atomic_read(&bio->__bi_remaining) <= 0);
1439
326e1dbb 1440 if (atomic_dec_and_test(&bio->__bi_remaining)) {
b7c44ed9 1441 bio_clear_flag(bio, BIO_CHAIN);
c4cf5261 1442 return true;
326e1dbb 1443 }
c4cf5261
JA
1444
1445 return false;
1446}
1447
1da177e4
LT
1448/**
1449 * bio_endio - end I/O on a bio
1450 * @bio: bio
1da177e4
LT
1451 *
1452 * Description:
4246a0b6
CH
1453 * bio_endio() will end I/O on the whole bio. bio_endio() is the preferred
1454 * way to end I/O on a bio. No one should call bi_end_io() directly on a
1455 * bio unless they own it and thus know that it has an end_io function.
fbbaf700
N
1456 *
1457 * bio_endio() can be called several times on a bio that has been chained
1458 * using bio_chain(). The ->bi_end_io() function will only be called the
60b6a7e6 1459 * last time.
1da177e4 1460 **/
4246a0b6 1461void bio_endio(struct bio *bio)
1da177e4 1462{
ba8c6967 1463again:
2b885517 1464 if (!bio_remaining_done(bio))
ba8c6967 1465 return;
7c20f116
CH
1466 if (!bio_integrity_endio(bio))
1467 return;
1da177e4 1468
a647a524 1469 if (bio->bi_bdev && bio_flagged(bio, BIO_TRACKED))
309dca30 1470 rq_qos_done_bio(bio->bi_bdev->bd_disk->queue, bio);
67b42d0b 1471
60b6a7e6
EH
1472 if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
1473 trace_block_bio_complete(bio->bi_bdev->bd_disk->queue, bio);
1474 bio_clear_flag(bio, BIO_TRACE_COMPLETION);
1475 }
1476
ba8c6967
CH
1477 /*
1478 * Need to have a real endio function for chained bios, otherwise
1479 * various corner cases will break (like stacking block devices that
1480 * save/restore bi_end_io) - however, we want to avoid unbounded
1481 * recursion and blowing the stack. Tail call optimization would
1482 * handle this, but compiling with frame pointers also disables
1483 * gcc's sibling call optimization.
1484 */
1485 if (bio->bi_end_io == bio_chain_endio) {
1486 bio = __bio_chain_endio(bio);
1487 goto again;
196d38bc 1488 }
ba8c6967 1489
9e234eea 1490 blk_throtl_bio_endio(bio);
b222dd2f
SL
1491 /* release cgroup info */
1492 bio_uninit(bio);
ba8c6967
CH
1493 if (bio->bi_end_io)
1494 bio->bi_end_io(bio);
1da177e4 1495}
a112a71d 1496EXPORT_SYMBOL(bio_endio);
1da177e4 1497
20d0189b
KO
1498/**
1499 * bio_split - split a bio
1500 * @bio: bio to split
1501 * @sectors: number of sectors to split from the front of @bio
1502 * @gfp: gfp mask
1503 * @bs: bio set to allocate from
1504 *
1505 * Allocates and returns a new bio which represents @sectors from the start of
1506 * @bio, and updates @bio to represent the remaining sectors.
1507 *
f3f5da62 1508 * Unless this is a discard request the newly allocated bio will point
dad77584
BVA
1509 * to @bio's bi_io_vec. It is the caller's responsibility to ensure that
1510 * neither @bio nor @bs are freed before the split bio.
20d0189b
KO
1511 */
1512struct bio *bio_split(struct bio *bio, int sectors,
1513 gfp_t gfp, struct bio_set *bs)
1514{
f341a4d3 1515 struct bio *split;
20d0189b
KO
1516
1517 BUG_ON(sectors <= 0);
1518 BUG_ON(sectors >= bio_sectors(bio));
1519
0512a75b
KB
1520 /* Zone append commands cannot be split */
1521 if (WARN_ON_ONCE(bio_op(bio) == REQ_OP_ZONE_APPEND))
1522 return NULL;
1523
f9d03f96 1524 split = bio_clone_fast(bio, gfp, bs);
20d0189b
KO
1525 if (!split)
1526 return NULL;
1527
1528 split->bi_iter.bi_size = sectors << 9;
1529
1530 if (bio_integrity(split))
fbd08e76 1531 bio_integrity_trim(split);
20d0189b
KO
1532
1533 bio_advance(bio, split->bi_iter.bi_size);
1534
fbbaf700 1535 if (bio_flagged(bio, BIO_TRACE_COMPLETION))
20d59023 1536 bio_set_flag(split, BIO_TRACE_COMPLETION);
fbbaf700 1537
20d0189b
KO
1538 return split;
1539}
1540EXPORT_SYMBOL(bio_split);
1541
6678d83f
KO
1542/**
1543 * bio_trim - trim a bio
1544 * @bio: bio to trim
1545 * @offset: number of sectors to trim from the front of @bio
1546 * @size: size we want to trim @bio to, in sectors
e83502ca
CK
1547 *
1548 * This function is typically used for bios that are cloned and submitted
1549 * to the underlying device in parts.
6678d83f 1550 */
e83502ca 1551void bio_trim(struct bio *bio, sector_t offset, sector_t size)
6678d83f 1552{
e83502ca
CK
1553 if (WARN_ON_ONCE(offset > BIO_MAX_SECTORS || size > BIO_MAX_SECTORS ||
1554 offset + size > bio->bi_iter.bi_size))
1555 return;
6678d83f
KO
1556
1557 size <<= 9;
4f024f37 1558 if (offset == 0 && size == bio->bi_iter.bi_size)
6678d83f
KO
1559 return;
1560
6678d83f 1561 bio_advance(bio, offset << 9);
4f024f37 1562 bio->bi_iter.bi_size = size;
376a78ab
DM
1563
1564 if (bio_integrity(bio))
fbd08e76 1565 bio_integrity_trim(bio);
6678d83f
KO
1566}
1567EXPORT_SYMBOL_GPL(bio_trim);
1568
1da177e4
LT
1569/*
1570 * create memory pools for biovec's in a bio_set.
1571 * use the global biovec slabs created for general use.
1572 */
8aa6ba2f 1573int biovec_init_pool(mempool_t *pool, int pool_entries)
1da177e4 1574{
7a800a20 1575 struct biovec_slab *bp = bvec_slabs + ARRAY_SIZE(bvec_slabs) - 1;
1da177e4 1576
8aa6ba2f 1577 return mempool_init_slab_pool(pool, pool_entries, bp->slab);
1da177e4
LT
1578}
1579
917a38c7
KO
1580/*
1581 * bioset_exit - exit a bioset initialized with bioset_init()
1582 *
1583 * May be called on a zeroed but uninitialized bioset (i.e. allocated with
1584 * kzalloc()).
1585 */
1586void bioset_exit(struct bio_set *bs)
1da177e4 1587{
be4d234d 1588 bio_alloc_cache_destroy(bs);
df2cb6da
KO
1589 if (bs->rescue_workqueue)
1590 destroy_workqueue(bs->rescue_workqueue);
917a38c7 1591 bs->rescue_workqueue = NULL;
df2cb6da 1592
8aa6ba2f
KO
1593 mempool_exit(&bs->bio_pool);
1594 mempool_exit(&bs->bvec_pool);
9f060e22 1595
7878cba9 1596 bioset_integrity_free(bs);
917a38c7
KO
1597 if (bs->bio_slab)
1598 bio_put_slab(bs);
1599 bs->bio_slab = NULL;
1600}
1601EXPORT_SYMBOL(bioset_exit);
1da177e4 1602
917a38c7
KO
1603/**
1604 * bioset_init - Initialize a bio_set
dad08527 1605 * @bs: pool to initialize
917a38c7
KO
1606 * @pool_size: Number of bio and bio_vecs to cache in the mempool
1607 * @front_pad: Number of bytes to allocate in front of the returned bio
1608 * @flags: Flags to modify behavior, currently %BIOSET_NEED_BVECS
1609 * and %BIOSET_NEED_RESCUER
1610 *
dad08527
KO
1611 * Description:
1612 * Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller
1613 * to ask for a number of bytes to be allocated in front of the bio.
1614 * Front pad allocation is useful for embedding the bio inside
1615 * another structure, to avoid allocating extra data to go with the bio.
1616 * Note that the bio must be embedded at the END of that structure always,
1617 * or things will break badly.
1618 * If %BIOSET_NEED_BVECS is set in @flags, a separate pool will be allocated
1619 * for allocating iovecs. This pool is not needed e.g. for bio_clone_fast().
1620 * If %BIOSET_NEED_RESCUER is set, a workqueue is created which can be used to
1621 * dispatch queued requests when the mempool runs out of space.
1622 *
917a38c7
KO
1623 */
1624int bioset_init(struct bio_set *bs,
1625 unsigned int pool_size,
1626 unsigned int front_pad,
1627 int flags)
1628{
917a38c7 1629 bs->front_pad = front_pad;
9f180e31
ML
1630 if (flags & BIOSET_NEED_BVECS)
1631 bs->back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec);
1632 else
1633 bs->back_pad = 0;
917a38c7
KO
1634
1635 spin_lock_init(&bs->rescue_lock);
1636 bio_list_init(&bs->rescue_list);
1637 INIT_WORK(&bs->rescue_work, bio_alloc_rescue);
1638
49d1ec85 1639 bs->bio_slab = bio_find_or_create_slab(bs);
917a38c7
KO
1640 if (!bs->bio_slab)
1641 return -ENOMEM;
1642
1643 if (mempool_init_slab_pool(&bs->bio_pool, pool_size, bs->bio_slab))
1644 goto bad;
1645
1646 if ((flags & BIOSET_NEED_BVECS) &&
1647 biovec_init_pool(&bs->bvec_pool, pool_size))
1648 goto bad;
1649
be4d234d
JA
1650 if (flags & BIOSET_NEED_RESCUER) {
1651 bs->rescue_workqueue = alloc_workqueue("bioset",
1652 WQ_MEM_RECLAIM, 0);
1653 if (!bs->rescue_workqueue)
1654 goto bad;
1655 }
1656 if (flags & BIOSET_PERCPU_CACHE) {
1657 bs->cache = alloc_percpu(struct bio_alloc_cache);
1658 if (!bs->cache)
1659 goto bad;
1660 cpuhp_state_add_instance_nocalls(CPUHP_BIO_DEAD, &bs->cpuhp_dead);
1661 }
917a38c7
KO
1662
1663 return 0;
1664bad:
1665 bioset_exit(bs);
1666 return -ENOMEM;
1667}
1668EXPORT_SYMBOL(bioset_init);
1669
28e89fd9
JA
1670/*
1671 * Initialize and setup a new bio_set, based on the settings from
1672 * another bio_set.
1673 */
1674int bioset_init_from_src(struct bio_set *bs, struct bio_set *src)
1675{
1676 int flags;
1677
1678 flags = 0;
1679 if (src->bvec_pool.min_nr)
1680 flags |= BIOSET_NEED_BVECS;
1681 if (src->rescue_workqueue)
1682 flags |= BIOSET_NEED_RESCUER;
1683
1684 return bioset_init(bs, src->bio_pool.min_nr, src->front_pad, flags);
1685}
1686EXPORT_SYMBOL(bioset_init_from_src);
1687
be4d234d
JA
1688/**
1689 * bio_alloc_kiocb - Allocate a bio from bio_set based on kiocb
1690 * @kiocb: kiocb describing the IO
0ef47db1 1691 * @nr_vecs: number of iovecs to pre-allocate
be4d234d
JA
1692 * @bs: bio_set to allocate from
1693 *
1694 * Description:
1695 * Like @bio_alloc_bioset, but pass in the kiocb. The kiocb is only
1696 * used to check if we should dip into the per-cpu bio_set allocation
3d5b3fbe
JA
1697 * cache. The allocation uses GFP_KERNEL internally. On return, the
1698 * bio is marked BIO_PERCPU_CACHEABLE, and the final put of the bio
1699 * MUST be done from process context, not hard/soft IRQ.
be4d234d
JA
1700 *
1701 */
1702struct bio *bio_alloc_kiocb(struct kiocb *kiocb, unsigned short nr_vecs,
1703 struct bio_set *bs)
1704{
1705 struct bio_alloc_cache *cache;
1706 struct bio *bio;
1707
1708 if (!(kiocb->ki_flags & IOCB_ALLOC_CACHE) || nr_vecs > BIO_INLINE_VECS)
1709 return bio_alloc_bioset(GFP_KERNEL, nr_vecs, bs);
1710
1711 cache = per_cpu_ptr(bs->cache, get_cpu());
1712 bio = bio_list_pop(&cache->free_list);
1713 if (bio) {
1714 cache->nr--;
1715 put_cpu();
1716 bio_init(bio, nr_vecs ? bio->bi_inline_vecs : NULL, nr_vecs);
1717 bio->bi_pool = bs;
1718 bio_set_flag(bio, BIO_PERCPU_CACHE);
1719 return bio;
1720 }
1721 put_cpu();
1722 bio = bio_alloc_bioset(GFP_KERNEL, nr_vecs, bs);
1723 bio_set_flag(bio, BIO_PERCPU_CACHE);
1724 return bio;
1725}
1726EXPORT_SYMBOL_GPL(bio_alloc_kiocb);
1727
de76fd89 1728static int __init init_bio(void)
1da177e4
LT
1729{
1730 int i;
1731
7878cba9 1732 bio_integrity_init();
1da177e4 1733
de76fd89
CH
1734 for (i = 0; i < ARRAY_SIZE(bvec_slabs); i++) {
1735 struct biovec_slab *bvs = bvec_slabs + i;
a7fcd37c 1736
de76fd89
CH
1737 bvs->slab = kmem_cache_create(bvs->name,
1738 bvs->nr_vecs * sizeof(struct bio_vec), 0,
1739 SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
1da177e4 1740 }
1da177e4 1741
be4d234d
JA
1742 cpuhp_setup_state_multi(CPUHP_BIO_DEAD, "block/bio:dead", NULL,
1743 bio_cpu_dead);
1744
f4f8154a 1745 if (bioset_init(&fs_bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS))
1da177e4
LT
1746 panic("bio: can't allocate bios\n");
1747
f4f8154a 1748 if (bioset_integrity_create(&fs_bio_set, BIO_POOL_SIZE))
a91a2785
MP
1749 panic("bio: can't create integrity pool\n");
1750
1da177e4
LT
1751 return 0;
1752}
1da177e4 1753subsys_initcall(init_bio);