rtw88: remove redundant null pointer check on arrays
[linux-2.6-block.git] / block / bio.c
CommitLineData
8c16567d 1// SPDX-License-Identifier: GPL-2.0
1da177e4 2/*
0fe23479 3 * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk>
1da177e4
LT
4 */
5#include <linux/mm.h>
6#include <linux/swap.h>
7#include <linux/bio.h>
8#include <linux/blkdev.h>
a27bb332 9#include <linux/uio.h>
852c788f 10#include <linux/iocontext.h>
1da177e4
LT
11#include <linux/slab.h>
12#include <linux/init.h>
13#include <linux/kernel.h>
630d9c47 14#include <linux/export.h>
1da177e4
LT
15#include <linux/mempool.h>
16#include <linux/workqueue.h>
852c788f 17#include <linux/cgroup.h>
08e18eab 18#include <linux/blk-cgroup.h>
b4c5875d 19#include <linux/highmem.h>
1da177e4 20
55782138 21#include <trace/events/block.h>
9e234eea 22#include "blk.h"
67b42d0b 23#include "blk-rq-qos.h"
0bfc2455 24
392ddc32
JA
25/*
26 * Test patch to inline a certain number of bi_io_vec's inside the bio
27 * itself, to shrink a bio data allocation from two mempool calls to one
28 */
29#define BIO_INLINE_VECS 4
30
1da177e4
LT
31/*
32 * if you change this list, also change bvec_alloc or things will
33 * break badly! cannot be bigger than what you can fit into an
34 * unsigned short
35 */
bd5c4fac 36#define BV(x, n) { .nr_vecs = x, .name = "biovec-"#n }
ed996a52 37static struct biovec_slab bvec_slabs[BVEC_POOL_NR] __read_mostly = {
bd5c4fac 38 BV(1, 1), BV(4, 4), BV(16, 16), BV(64, 64), BV(128, 128), BV(BIO_MAX_PAGES, max),
1da177e4
LT
39};
40#undef BV
41
1da177e4
LT
42/*
43 * fs_bio_set is the bio_set containing bio and iovec memory pools used by
44 * IO code that does not need private memory pools.
45 */
f4f8154a 46struct bio_set fs_bio_set;
3f86a82a 47EXPORT_SYMBOL(fs_bio_set);
1da177e4 48
bb799ca0
JA
49/*
50 * Our slab pool management
51 */
52struct bio_slab {
53 struct kmem_cache *slab;
54 unsigned int slab_ref;
55 unsigned int slab_size;
56 char name[8];
57};
58static DEFINE_MUTEX(bio_slab_lock);
59static struct bio_slab *bio_slabs;
60static unsigned int bio_slab_nr, bio_slab_max;
61
62static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
63{
64 unsigned int sz = sizeof(struct bio) + extra_size;
65 struct kmem_cache *slab = NULL;
389d7b26 66 struct bio_slab *bslab, *new_bio_slabs;
386bc35a 67 unsigned int new_bio_slab_max;
bb799ca0
JA
68 unsigned int i, entry = -1;
69
70 mutex_lock(&bio_slab_lock);
71
72 i = 0;
73 while (i < bio_slab_nr) {
f06f135d 74 bslab = &bio_slabs[i];
bb799ca0
JA
75
76 if (!bslab->slab && entry == -1)
77 entry = i;
78 else if (bslab->slab_size == sz) {
79 slab = bslab->slab;
80 bslab->slab_ref++;
81 break;
82 }
83 i++;
84 }
85
86 if (slab)
87 goto out_unlock;
88
89 if (bio_slab_nr == bio_slab_max && entry == -1) {
386bc35a 90 new_bio_slab_max = bio_slab_max << 1;
389d7b26 91 new_bio_slabs = krealloc(bio_slabs,
386bc35a 92 new_bio_slab_max * sizeof(struct bio_slab),
389d7b26
AK
93 GFP_KERNEL);
94 if (!new_bio_slabs)
bb799ca0 95 goto out_unlock;
386bc35a 96 bio_slab_max = new_bio_slab_max;
389d7b26 97 bio_slabs = new_bio_slabs;
bb799ca0
JA
98 }
99 if (entry == -1)
100 entry = bio_slab_nr++;
101
102 bslab = &bio_slabs[entry];
103
104 snprintf(bslab->name, sizeof(bslab->name), "bio-%d", entry);
6a241483
MP
105 slab = kmem_cache_create(bslab->name, sz, ARCH_KMALLOC_MINALIGN,
106 SLAB_HWCACHE_ALIGN, NULL);
bb799ca0
JA
107 if (!slab)
108 goto out_unlock;
109
bb799ca0
JA
110 bslab->slab = slab;
111 bslab->slab_ref = 1;
112 bslab->slab_size = sz;
113out_unlock:
114 mutex_unlock(&bio_slab_lock);
115 return slab;
116}
117
118static void bio_put_slab(struct bio_set *bs)
119{
120 struct bio_slab *bslab = NULL;
121 unsigned int i;
122
123 mutex_lock(&bio_slab_lock);
124
125 for (i = 0; i < bio_slab_nr; i++) {
126 if (bs->bio_slab == bio_slabs[i].slab) {
127 bslab = &bio_slabs[i];
128 break;
129 }
130 }
131
132 if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n"))
133 goto out;
134
135 WARN_ON(!bslab->slab_ref);
136
137 if (--bslab->slab_ref)
138 goto out;
139
140 kmem_cache_destroy(bslab->slab);
141 bslab->slab = NULL;
142
143out:
144 mutex_unlock(&bio_slab_lock);
145}
146
7ba1ba12
MP
147unsigned int bvec_nr_vecs(unsigned short idx)
148{
d6c02a9b 149 return bvec_slabs[--idx].nr_vecs;
7ba1ba12
MP
150}
151
9f060e22 152void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned int idx)
bb799ca0 153{
ed996a52
CH
154 if (!idx)
155 return;
156 idx--;
157
158 BIO_BUG_ON(idx >= BVEC_POOL_NR);
bb799ca0 159
ed996a52 160 if (idx == BVEC_POOL_MAX) {
9f060e22 161 mempool_free(bv, pool);
ed996a52 162 } else {
bb799ca0
JA
163 struct biovec_slab *bvs = bvec_slabs + idx;
164
165 kmem_cache_free(bvs->slab, bv);
166 }
167}
168
9f060e22
KO
169struct bio_vec *bvec_alloc(gfp_t gfp_mask, int nr, unsigned long *idx,
170 mempool_t *pool)
1da177e4
LT
171{
172 struct bio_vec *bvl;
1da177e4 173
7ff9345f
JA
174 /*
175 * see comment near bvec_array define!
176 */
177 switch (nr) {
178 case 1:
179 *idx = 0;
180 break;
181 case 2 ... 4:
182 *idx = 1;
183 break;
184 case 5 ... 16:
185 *idx = 2;
186 break;
187 case 17 ... 64:
188 *idx = 3;
189 break;
190 case 65 ... 128:
191 *idx = 4;
192 break;
193 case 129 ... BIO_MAX_PAGES:
194 *idx = 5;
195 break;
196 default:
197 return NULL;
198 }
199
200 /*
201 * idx now points to the pool we want to allocate from. only the
202 * 1-vec entry pool is mempool backed.
203 */
ed996a52 204 if (*idx == BVEC_POOL_MAX) {
7ff9345f 205fallback:
9f060e22 206 bvl = mempool_alloc(pool, gfp_mask);
7ff9345f
JA
207 } else {
208 struct biovec_slab *bvs = bvec_slabs + *idx;
d0164adc 209 gfp_t __gfp_mask = gfp_mask & ~(__GFP_DIRECT_RECLAIM | __GFP_IO);
7ff9345f 210
0a0d96b0 211 /*
7ff9345f
JA
212 * Make this allocation restricted and don't dump info on
213 * allocation failures, since we'll fallback to the mempool
214 * in case of failure.
0a0d96b0 215 */
7ff9345f 216 __gfp_mask |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
1da177e4 217
0a0d96b0 218 /*
d0164adc 219 * Try a slab allocation. If this fails and __GFP_DIRECT_RECLAIM
7ff9345f 220 * is set, retry with the 1-entry mempool
0a0d96b0 221 */
7ff9345f 222 bvl = kmem_cache_alloc(bvs->slab, __gfp_mask);
d0164adc 223 if (unlikely(!bvl && (gfp_mask & __GFP_DIRECT_RECLAIM))) {
ed996a52 224 *idx = BVEC_POOL_MAX;
7ff9345f
JA
225 goto fallback;
226 }
227 }
228
ed996a52 229 (*idx)++;
1da177e4
LT
230 return bvl;
231}
232
9ae3b3f5 233void bio_uninit(struct bio *bio)
1da177e4 234{
6f70fb66 235 bio_disassociate_blkg(bio);
4254bba1 236}
9ae3b3f5 237EXPORT_SYMBOL(bio_uninit);
7ba1ba12 238
4254bba1
KO
239static void bio_free(struct bio *bio)
240{
241 struct bio_set *bs = bio->bi_pool;
242 void *p;
243
9ae3b3f5 244 bio_uninit(bio);
4254bba1
KO
245
246 if (bs) {
8aa6ba2f 247 bvec_free(&bs->bvec_pool, bio->bi_io_vec, BVEC_POOL_IDX(bio));
4254bba1
KO
248
249 /*
250 * If we have front padding, adjust the bio pointer before freeing
251 */
252 p = bio;
bb799ca0
JA
253 p -= bs->front_pad;
254
8aa6ba2f 255 mempool_free(p, &bs->bio_pool);
4254bba1
KO
256 } else {
257 /* Bio was allocated by bio_kmalloc() */
258 kfree(bio);
259 }
3676347a
PO
260}
261
9ae3b3f5
JA
262/*
263 * Users of this function have their own bio allocation. Subsequently,
264 * they must remember to pair any call to bio_init() with bio_uninit()
265 * when IO has completed, or when the bio is released.
266 */
3a83f467
ML
267void bio_init(struct bio *bio, struct bio_vec *table,
268 unsigned short max_vecs)
1da177e4 269{
2b94de55 270 memset(bio, 0, sizeof(*bio));
c4cf5261 271 atomic_set(&bio->__bi_remaining, 1);
dac56212 272 atomic_set(&bio->__bi_cnt, 1);
3a83f467
ML
273
274 bio->bi_io_vec = table;
275 bio->bi_max_vecs = max_vecs;
1da177e4 276}
a112a71d 277EXPORT_SYMBOL(bio_init);
1da177e4 278
f44b48c7
KO
279/**
280 * bio_reset - reinitialize a bio
281 * @bio: bio to reset
282 *
283 * Description:
284 * After calling bio_reset(), @bio will be in the same state as a freshly
285 * allocated bio returned bio bio_alloc_bioset() - the only fields that are
286 * preserved are the ones that are initialized by bio_alloc_bioset(). See
287 * comment in struct bio.
288 */
289void bio_reset(struct bio *bio)
290{
291 unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS);
292
9ae3b3f5 293 bio_uninit(bio);
f44b48c7
KO
294
295 memset(bio, 0, BIO_RESET_BYTES);
4246a0b6 296 bio->bi_flags = flags;
c4cf5261 297 atomic_set(&bio->__bi_remaining, 1);
f44b48c7
KO
298}
299EXPORT_SYMBOL(bio_reset);
300
38f8baae 301static struct bio *__bio_chain_endio(struct bio *bio)
196d38bc 302{
4246a0b6
CH
303 struct bio *parent = bio->bi_private;
304
4e4cbee9
CH
305 if (!parent->bi_status)
306 parent->bi_status = bio->bi_status;
196d38bc 307 bio_put(bio);
38f8baae
CH
308 return parent;
309}
310
311static void bio_chain_endio(struct bio *bio)
312{
313 bio_endio(__bio_chain_endio(bio));
196d38bc
KO
314}
315
316/**
317 * bio_chain - chain bio completions
1051a902
RD
318 * @bio: the target bio
319 * @parent: the @bio's parent bio
196d38bc
KO
320 *
321 * The caller won't have a bi_end_io called when @bio completes - instead,
322 * @parent's bi_end_io won't be called until both @parent and @bio have
323 * completed; the chained bio will also be freed when it completes.
324 *
325 * The caller must not set bi_private or bi_end_io in @bio.
326 */
327void bio_chain(struct bio *bio, struct bio *parent)
328{
329 BUG_ON(bio->bi_private || bio->bi_end_io);
330
331 bio->bi_private = parent;
332 bio->bi_end_io = bio_chain_endio;
c4cf5261 333 bio_inc_remaining(parent);
196d38bc
KO
334}
335EXPORT_SYMBOL(bio_chain);
336
df2cb6da
KO
337static void bio_alloc_rescue(struct work_struct *work)
338{
339 struct bio_set *bs = container_of(work, struct bio_set, rescue_work);
340 struct bio *bio;
341
342 while (1) {
343 spin_lock(&bs->rescue_lock);
344 bio = bio_list_pop(&bs->rescue_list);
345 spin_unlock(&bs->rescue_lock);
346
347 if (!bio)
348 break;
349
350 generic_make_request(bio);
351 }
352}
353
354static void punt_bios_to_rescuer(struct bio_set *bs)
355{
356 struct bio_list punt, nopunt;
357 struct bio *bio;
358
47e0fb46
N
359 if (WARN_ON_ONCE(!bs->rescue_workqueue))
360 return;
df2cb6da
KO
361 /*
362 * In order to guarantee forward progress we must punt only bios that
363 * were allocated from this bio_set; otherwise, if there was a bio on
364 * there for a stacking driver higher up in the stack, processing it
365 * could require allocating bios from this bio_set, and doing that from
366 * our own rescuer would be bad.
367 *
368 * Since bio lists are singly linked, pop them all instead of trying to
369 * remove from the middle of the list:
370 */
371
372 bio_list_init(&punt);
373 bio_list_init(&nopunt);
374
f5fe1b51 375 while ((bio = bio_list_pop(&current->bio_list[0])))
df2cb6da 376 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
f5fe1b51 377 current->bio_list[0] = nopunt;
df2cb6da 378
f5fe1b51
N
379 bio_list_init(&nopunt);
380 while ((bio = bio_list_pop(&current->bio_list[1])))
381 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
382 current->bio_list[1] = nopunt;
df2cb6da
KO
383
384 spin_lock(&bs->rescue_lock);
385 bio_list_merge(&bs->rescue_list, &punt);
386 spin_unlock(&bs->rescue_lock);
387
388 queue_work(bs->rescue_workqueue, &bs->rescue_work);
389}
390
1da177e4
LT
391/**
392 * bio_alloc_bioset - allocate a bio for I/O
519c8e9f 393 * @gfp_mask: the GFP_* mask given to the slab allocator
1da177e4 394 * @nr_iovecs: number of iovecs to pre-allocate
db18efac 395 * @bs: the bio_set to allocate from.
1da177e4
LT
396 *
397 * Description:
3f86a82a
KO
398 * If @bs is NULL, uses kmalloc() to allocate the bio; else the allocation is
399 * backed by the @bs's mempool.
400 *
d0164adc
MG
401 * When @bs is not NULL, if %__GFP_DIRECT_RECLAIM is set then bio_alloc will
402 * always be able to allocate a bio. This is due to the mempool guarantees.
403 * To make this work, callers must never allocate more than 1 bio at a time
404 * from this pool. Callers that need to allocate more than 1 bio must always
405 * submit the previously allocated bio for IO before attempting to allocate
406 * a new one. Failure to do so can cause deadlocks under memory pressure.
3f86a82a 407 *
df2cb6da
KO
408 * Note that when running under generic_make_request() (i.e. any block
409 * driver), bios are not submitted until after you return - see the code in
410 * generic_make_request() that converts recursion into iteration, to prevent
411 * stack overflows.
412 *
413 * This would normally mean allocating multiple bios under
414 * generic_make_request() would be susceptible to deadlocks, but we have
415 * deadlock avoidance code that resubmits any blocked bios from a rescuer
416 * thread.
417 *
418 * However, we do not guarantee forward progress for allocations from other
419 * mempools. Doing multiple allocations from the same mempool under
420 * generic_make_request() should be avoided - instead, use bio_set's front_pad
421 * for per bio allocations.
422 *
3f86a82a
KO
423 * RETURNS:
424 * Pointer to new bio on success, NULL on failure.
425 */
7a88fa19
DC
426struct bio *bio_alloc_bioset(gfp_t gfp_mask, unsigned int nr_iovecs,
427 struct bio_set *bs)
1da177e4 428{
df2cb6da 429 gfp_t saved_gfp = gfp_mask;
3f86a82a
KO
430 unsigned front_pad;
431 unsigned inline_vecs;
34053979 432 struct bio_vec *bvl = NULL;
451a9ebf
TH
433 struct bio *bio;
434 void *p;
435
3f86a82a
KO
436 if (!bs) {
437 if (nr_iovecs > UIO_MAXIOV)
438 return NULL;
439
440 p = kmalloc(sizeof(struct bio) +
441 nr_iovecs * sizeof(struct bio_vec),
442 gfp_mask);
443 front_pad = 0;
444 inline_vecs = nr_iovecs;
445 } else {
d8f429e1 446 /* should not use nobvec bioset for nr_iovecs > 0 */
8aa6ba2f
KO
447 if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) &&
448 nr_iovecs > 0))
d8f429e1 449 return NULL;
df2cb6da
KO
450 /*
451 * generic_make_request() converts recursion to iteration; this
452 * means if we're running beneath it, any bios we allocate and
453 * submit will not be submitted (and thus freed) until after we
454 * return.
455 *
456 * This exposes us to a potential deadlock if we allocate
457 * multiple bios from the same bio_set() while running
458 * underneath generic_make_request(). If we were to allocate
459 * multiple bios (say a stacking block driver that was splitting
460 * bios), we would deadlock if we exhausted the mempool's
461 * reserve.
462 *
463 * We solve this, and guarantee forward progress, with a rescuer
464 * workqueue per bio_set. If we go to allocate and there are
465 * bios on current->bio_list, we first try the allocation
d0164adc
MG
466 * without __GFP_DIRECT_RECLAIM; if that fails, we punt those
467 * bios we would be blocking to the rescuer workqueue before
468 * we retry with the original gfp_flags.
df2cb6da
KO
469 */
470
f5fe1b51
N
471 if (current->bio_list &&
472 (!bio_list_empty(&current->bio_list[0]) ||
47e0fb46
N
473 !bio_list_empty(&current->bio_list[1])) &&
474 bs->rescue_workqueue)
d0164adc 475 gfp_mask &= ~__GFP_DIRECT_RECLAIM;
df2cb6da 476
8aa6ba2f 477 p = mempool_alloc(&bs->bio_pool, gfp_mask);
df2cb6da
KO
478 if (!p && gfp_mask != saved_gfp) {
479 punt_bios_to_rescuer(bs);
480 gfp_mask = saved_gfp;
8aa6ba2f 481 p = mempool_alloc(&bs->bio_pool, gfp_mask);
df2cb6da
KO
482 }
483
3f86a82a
KO
484 front_pad = bs->front_pad;
485 inline_vecs = BIO_INLINE_VECS;
486 }
487
451a9ebf
TH
488 if (unlikely(!p))
489 return NULL;
1da177e4 490
3f86a82a 491 bio = p + front_pad;
3a83f467 492 bio_init(bio, NULL, 0);
34053979 493
3f86a82a 494 if (nr_iovecs > inline_vecs) {
ed996a52
CH
495 unsigned long idx = 0;
496
8aa6ba2f 497 bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, &bs->bvec_pool);
df2cb6da
KO
498 if (!bvl && gfp_mask != saved_gfp) {
499 punt_bios_to_rescuer(bs);
500 gfp_mask = saved_gfp;
8aa6ba2f 501 bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, &bs->bvec_pool);
df2cb6da
KO
502 }
503
34053979
IM
504 if (unlikely(!bvl))
505 goto err_free;
a38352e0 506
ed996a52 507 bio->bi_flags |= idx << BVEC_POOL_OFFSET;
3f86a82a
KO
508 } else if (nr_iovecs) {
509 bvl = bio->bi_inline_vecs;
1da177e4 510 }
3f86a82a
KO
511
512 bio->bi_pool = bs;
34053979 513 bio->bi_max_vecs = nr_iovecs;
34053979 514 bio->bi_io_vec = bvl;
1da177e4 515 return bio;
34053979
IM
516
517err_free:
8aa6ba2f 518 mempool_free(p, &bs->bio_pool);
34053979 519 return NULL;
1da177e4 520}
a112a71d 521EXPORT_SYMBOL(bio_alloc_bioset);
1da177e4 522
38a72dac 523void zero_fill_bio_iter(struct bio *bio, struct bvec_iter start)
1da177e4
LT
524{
525 unsigned long flags;
7988613b
KO
526 struct bio_vec bv;
527 struct bvec_iter iter;
1da177e4 528
38a72dac 529 __bio_for_each_segment(bv, bio, iter, start) {
7988613b
KO
530 char *data = bvec_kmap_irq(&bv, &flags);
531 memset(data, 0, bv.bv_len);
532 flush_dcache_page(bv.bv_page);
1da177e4
LT
533 bvec_kunmap_irq(data, &flags);
534 }
535}
38a72dac 536EXPORT_SYMBOL(zero_fill_bio_iter);
1da177e4
LT
537
538/**
539 * bio_put - release a reference to a bio
540 * @bio: bio to release reference to
541 *
542 * Description:
543 * Put a reference to a &struct bio, either one you have gotten with
9b10f6a9 544 * bio_alloc, bio_get or bio_clone_*. The last put of a bio will free it.
1da177e4
LT
545 **/
546void bio_put(struct bio *bio)
547{
dac56212 548 if (!bio_flagged(bio, BIO_REFFED))
4254bba1 549 bio_free(bio);
dac56212
JA
550 else {
551 BIO_BUG_ON(!atomic_read(&bio->__bi_cnt));
552
553 /*
554 * last put frees it
555 */
556 if (atomic_dec_and_test(&bio->__bi_cnt))
557 bio_free(bio);
558 }
1da177e4 559}
a112a71d 560EXPORT_SYMBOL(bio_put);
1da177e4 561
59d276fe
KO
562/**
563 * __bio_clone_fast - clone a bio that shares the original bio's biovec
564 * @bio: destination bio
565 * @bio_src: bio to clone
566 *
567 * Clone a &bio. Caller will own the returned bio, but not
568 * the actual data it points to. Reference count of returned
569 * bio will be one.
570 *
571 * Caller must ensure that @bio_src is not freed before @bio.
572 */
573void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
574{
ed996a52 575 BUG_ON(bio->bi_pool && BVEC_POOL_IDX(bio));
59d276fe
KO
576
577 /*
74d46992 578 * most users will be overriding ->bi_disk with a new target,
59d276fe
KO
579 * so we don't set nor calculate new physical/hw segment counts here
580 */
74d46992 581 bio->bi_disk = bio_src->bi_disk;
62530ed8 582 bio->bi_partno = bio_src->bi_partno;
b7c44ed9 583 bio_set_flag(bio, BIO_CLONED);
111be883
SL
584 if (bio_flagged(bio_src, BIO_THROTTLED))
585 bio_set_flag(bio, BIO_THROTTLED);
1eff9d32 586 bio->bi_opf = bio_src->bi_opf;
ca474b73 587 bio->bi_ioprio = bio_src->bi_ioprio;
cb6934f8 588 bio->bi_write_hint = bio_src->bi_write_hint;
59d276fe
KO
589 bio->bi_iter = bio_src->bi_iter;
590 bio->bi_io_vec = bio_src->bi_io_vec;
20bd723e 591
db6638d7 592 bio_clone_blkg_association(bio, bio_src);
e439bedf 593 blkcg_bio_issue_init(bio);
59d276fe
KO
594}
595EXPORT_SYMBOL(__bio_clone_fast);
596
597/**
598 * bio_clone_fast - clone a bio that shares the original bio's biovec
599 * @bio: bio to clone
600 * @gfp_mask: allocation priority
601 * @bs: bio_set to allocate from
602 *
603 * Like __bio_clone_fast, only also allocates the returned bio
604 */
605struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs)
606{
607 struct bio *b;
608
609 b = bio_alloc_bioset(gfp_mask, 0, bs);
610 if (!b)
611 return NULL;
612
613 __bio_clone_fast(b, bio);
614
615 if (bio_integrity(bio)) {
616 int ret;
617
618 ret = bio_integrity_clone(b, bio, gfp_mask);
619
620 if (ret < 0) {
621 bio_put(b);
622 return NULL;
623 }
624 }
625
626 return b;
627}
628EXPORT_SYMBOL(bio_clone_fast);
629
5919482e
ML
630static inline bool page_is_mergeable(const struct bio_vec *bv,
631 struct page *page, unsigned int len, unsigned int off,
ff896738 632 bool *same_page)
5919482e
ML
633{
634 phys_addr_t vec_end_addr = page_to_phys(bv->bv_page) +
635 bv->bv_offset + bv->bv_len - 1;
636 phys_addr_t page_addr = page_to_phys(page);
637
638 if (vec_end_addr + 1 != page_addr + off)
639 return false;
640 if (xen_domain() && !xen_biovec_phys_mergeable(bv, page))
641 return false;
52d52d1c 642
ff896738
CH
643 *same_page = ((vec_end_addr & PAGE_MASK) == page_addr);
644 if (!*same_page && pfn_to_page(PFN_DOWN(vec_end_addr)) + 1 != page)
645 return false;
5919482e
ML
646 return true;
647}
648
384209cd
CH
649static bool bio_try_merge_pc_page(struct request_queue *q, struct bio *bio,
650 struct page *page, unsigned len, unsigned offset,
651 bool *same_page)
489fbbcb 652{
384209cd 653 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
489fbbcb
ML
654 unsigned long mask = queue_segment_boundary(q);
655 phys_addr_t addr1 = page_to_phys(bv->bv_page) + bv->bv_offset;
656 phys_addr_t addr2 = page_to_phys(page) + offset + len - 1;
657
658 if ((addr1 | mask) != (addr2 | mask))
659 return false;
489fbbcb
ML
660 if (bv->bv_len + len > queue_max_segment_size(q))
661 return false;
384209cd 662 return __bio_try_merge_page(bio, page, len, offset, same_page);
489fbbcb
ML
663}
664
1da177e4 665/**
19047087 666 * __bio_add_pc_page - attempt to add page to passthrough bio
c66a14d0
KO
667 * @q: the target queue
668 * @bio: destination bio
669 * @page: page to add
670 * @len: vec entry length
671 * @offset: vec entry offset
d1916c86 672 * @same_page: return if the merge happen inside the same page
1da177e4 673 *
c66a14d0
KO
674 * Attempt to add a page to the bio_vec maplist. This can fail for a
675 * number of reasons, such as the bio being full or target block device
676 * limitations. The target block device must allow bio's up to PAGE_SIZE,
677 * so it is always possible to add a single page to an empty bio.
678 *
5a8ce240 679 * This should only be used by passthrough bios.
1da177e4 680 */
4713839d 681static int __bio_add_pc_page(struct request_queue *q, struct bio *bio,
19047087 682 struct page *page, unsigned int len, unsigned int offset,
d1916c86 683 bool *same_page)
1da177e4 684{
1da177e4
LT
685 struct bio_vec *bvec;
686
687 /*
688 * cloned bio must not modify vec list
689 */
690 if (unlikely(bio_flagged(bio, BIO_CLONED)))
691 return 0;
692
c66a14d0 693 if (((bio->bi_iter.bi_size + len) >> 9) > queue_max_hw_sectors(q))
1da177e4
LT
694 return 0;
695
80cfd548 696 if (bio->bi_vcnt > 0) {
d1916c86 697 if (bio_try_merge_pc_page(q, bio, page, len, offset, same_page))
384209cd 698 return len;
320ea869
CH
699
700 /*
701 * If the queue doesn't support SG gaps and adding this segment
702 * would create a gap, disallow it.
703 */
384209cd 704 bvec = &bio->bi_io_vec[bio->bi_vcnt - 1];
320ea869
CH
705 if (bvec_gap_to_prev(q, bvec, offset))
706 return 0;
80cfd548
JA
707 }
708
79d08f89 709 if (bio_full(bio, len))
1da177e4
LT
710 return 0;
711
14ccb66b 712 if (bio->bi_vcnt >= queue_max_segments(q))
489fbbcb
ML
713 return 0;
714
fcbf6a08
ML
715 bvec = &bio->bi_io_vec[bio->bi_vcnt];
716 bvec->bv_page = page;
717 bvec->bv_len = len;
718 bvec->bv_offset = offset;
719 bio->bi_vcnt++;
dcdca753 720 bio->bi_iter.bi_size += len;
1da177e4
LT
721 return len;
722}
19047087
ML
723
724int bio_add_pc_page(struct request_queue *q, struct bio *bio,
725 struct page *page, unsigned int len, unsigned int offset)
726{
d1916c86
CH
727 bool same_page = false;
728 return __bio_add_pc_page(q, bio, page, len, offset, &same_page);
19047087 729}
a112a71d 730EXPORT_SYMBOL(bio_add_pc_page);
6e68af66 731
1da177e4 732/**
0aa69fd3
CH
733 * __bio_try_merge_page - try appending data to an existing bvec.
734 * @bio: destination bio
551879a4 735 * @page: start page to add
0aa69fd3 736 * @len: length of the data to add
551879a4 737 * @off: offset of the data relative to @page
ff896738 738 * @same_page: return if the segment has been merged inside the same page
1da177e4 739 *
0aa69fd3
CH
740 * Try to add the data at @page + @off to the last bvec of @bio. This is a
741 * a useful optimisation for file systems with a block size smaller than the
742 * page size.
743 *
551879a4
ML
744 * Warn if (@len, @off) crosses pages in case that @same_page is true.
745 *
0aa69fd3 746 * Return %true on success or %false on failure.
1da177e4 747 */
0aa69fd3 748bool __bio_try_merge_page(struct bio *bio, struct page *page,
ff896738 749 unsigned int len, unsigned int off, bool *same_page)
1da177e4 750{
c66a14d0 751 if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
0aa69fd3 752 return false;
762380ad 753
c66a14d0 754 if (bio->bi_vcnt > 0) {
0aa69fd3 755 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
5919482e
ML
756
757 if (page_is_mergeable(bv, page, len, off, same_page)) {
758 bv->bv_len += len;
759 bio->bi_iter.bi_size += len;
760 return true;
761 }
c66a14d0 762 }
0aa69fd3
CH
763 return false;
764}
765EXPORT_SYMBOL_GPL(__bio_try_merge_page);
c66a14d0 766
0aa69fd3 767/**
551879a4 768 * __bio_add_page - add page(s) to a bio in a new segment
0aa69fd3 769 * @bio: destination bio
551879a4
ML
770 * @page: start page to add
771 * @len: length of the data to add, may cross pages
772 * @off: offset of the data relative to @page, may cross pages
0aa69fd3
CH
773 *
774 * Add the data at @page + @off to @bio as a new bvec. The caller must ensure
775 * that @bio has space for another bvec.
776 */
777void __bio_add_page(struct bio *bio, struct page *page,
778 unsigned int len, unsigned int off)
779{
780 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt];
c66a14d0 781
0aa69fd3 782 WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
79d08f89 783 WARN_ON_ONCE(bio_full(bio, len));
0aa69fd3
CH
784
785 bv->bv_page = page;
786 bv->bv_offset = off;
787 bv->bv_len = len;
c66a14d0 788
c66a14d0 789 bio->bi_iter.bi_size += len;
0aa69fd3 790 bio->bi_vcnt++;
b8e24a93
JW
791
792 if (!bio_flagged(bio, BIO_WORKINGSET) && unlikely(PageWorkingset(page)))
793 bio_set_flag(bio, BIO_WORKINGSET);
0aa69fd3
CH
794}
795EXPORT_SYMBOL_GPL(__bio_add_page);
796
797/**
551879a4 798 * bio_add_page - attempt to add page(s) to bio
0aa69fd3 799 * @bio: destination bio
551879a4
ML
800 * @page: start page to add
801 * @len: vec entry length, may cross pages
802 * @offset: vec entry offset relative to @page, may cross pages
0aa69fd3 803 *
551879a4 804 * Attempt to add page(s) to the bio_vec maplist. This will only fail
0aa69fd3
CH
805 * if either bio->bi_vcnt == bio->bi_max_vecs or it's a cloned bio.
806 */
807int bio_add_page(struct bio *bio, struct page *page,
808 unsigned int len, unsigned int offset)
809{
ff896738
CH
810 bool same_page = false;
811
812 if (!__bio_try_merge_page(bio, page, len, offset, &same_page)) {
79d08f89 813 if (bio_full(bio, len))
0aa69fd3
CH
814 return 0;
815 __bio_add_page(bio, page, len, offset);
816 }
c66a14d0 817 return len;
1da177e4 818}
a112a71d 819EXPORT_SYMBOL(bio_add_page);
1da177e4 820
d241a95f 821void bio_release_pages(struct bio *bio, bool mark_dirty)
7321ecbf
CH
822{
823 struct bvec_iter_all iter_all;
824 struct bio_vec *bvec;
7321ecbf 825
b2d0d991
CH
826 if (bio_flagged(bio, BIO_NO_PAGE_REF))
827 return;
828
d241a95f
CH
829 bio_for_each_segment_all(bvec, bio, iter_all) {
830 if (mark_dirty && !PageCompound(bvec->bv_page))
831 set_page_dirty_lock(bvec->bv_page);
7321ecbf 832 put_page(bvec->bv_page);
d241a95f 833 }
7321ecbf
CH
834}
835
6d0c48ae
JA
836static int __bio_iov_bvec_add_pages(struct bio *bio, struct iov_iter *iter)
837{
838 const struct bio_vec *bv = iter->bvec;
839 unsigned int len;
840 size_t size;
841
842 if (WARN_ON_ONCE(iter->iov_offset > bv->bv_len))
843 return -EINVAL;
844
845 len = min_t(size_t, bv->bv_len - iter->iov_offset, iter->count);
846 size = bio_add_page(bio, bv->bv_page, len,
847 bv->bv_offset + iter->iov_offset);
a10584c3
CH
848 if (unlikely(size != len))
849 return -EINVAL;
a10584c3
CH
850 iov_iter_advance(iter, size);
851 return 0;
6d0c48ae
JA
852}
853
576ed913
CH
854#define PAGE_PTRS_PER_BVEC (sizeof(struct bio_vec) / sizeof(struct page *))
855
2cefe4db 856/**
17d51b10 857 * __bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
2cefe4db
KO
858 * @bio: bio to add pages to
859 * @iter: iov iterator describing the region to be mapped
860 *
17d51b10 861 * Pins pages from *iter and appends them to @bio's bvec array. The
2cefe4db 862 * pages will have to be released using put_page() when done.
17d51b10
MW
863 * For multi-segment *iter, this function only adds pages from the
864 * the next non-empty segment of the iov iterator.
2cefe4db 865 */
17d51b10 866static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
2cefe4db 867{
576ed913
CH
868 unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt;
869 unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt;
2cefe4db
KO
870 struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
871 struct page **pages = (struct page **)bv;
45691804 872 bool same_page = false;
576ed913
CH
873 ssize_t size, left;
874 unsigned len, i;
b403ea24 875 size_t offset;
576ed913
CH
876
877 /*
878 * Move page array up in the allocated memory for the bio vecs as far as
879 * possible so that we can start filling biovecs from the beginning
880 * without overwriting the temporary page array.
881 */
882 BUILD_BUG_ON(PAGE_PTRS_PER_BVEC < 2);
883 pages += entries_left * (PAGE_PTRS_PER_BVEC - 1);
2cefe4db
KO
884
885 size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset);
886 if (unlikely(size <= 0))
887 return size ? size : -EFAULT;
2cefe4db 888
576ed913
CH
889 for (left = size, i = 0; left > 0; left -= len, i++) {
890 struct page *page = pages[i];
2cefe4db 891
576ed913 892 len = min_t(size_t, PAGE_SIZE - offset, left);
45691804
CH
893
894 if (__bio_try_merge_page(bio, page, len, offset, &same_page)) {
895 if (same_page)
896 put_page(page);
897 } else {
79d08f89 898 if (WARN_ON_ONCE(bio_full(bio, len)))
45691804
CH
899 return -EINVAL;
900 __bio_add_page(bio, page, len, offset);
901 }
576ed913 902 offset = 0;
2cefe4db
KO
903 }
904
2cefe4db
KO
905 iov_iter_advance(iter, size);
906 return 0;
907}
17d51b10
MW
908
909/**
6d0c48ae 910 * bio_iov_iter_get_pages - add user or kernel pages to a bio
17d51b10 911 * @bio: bio to add pages to
6d0c48ae
JA
912 * @iter: iov iterator describing the region to be added
913 *
914 * This takes either an iterator pointing to user memory, or one pointing to
915 * kernel pages (BVEC iterator). If we're adding user pages, we pin them and
916 * map them into the kernel. On IO completion, the caller should put those
399254aa
JA
917 * pages. If we're adding kernel pages, and the caller told us it's safe to
918 * do so, we just have to add the pages to the bio directly. We don't grab an
919 * extra reference to those pages (the user should already have that), and we
920 * don't put the page on IO completion. The caller needs to check if the bio is
921 * flagged BIO_NO_PAGE_REF on IO completion. If it isn't, then pages should be
922 * released.
17d51b10 923 *
17d51b10 924 * The function tries, but does not guarantee, to pin as many pages as
6d0c48ae
JA
925 * fit into the bio, or are requested in *iter, whatever is smaller. If
926 * MM encounters an error pinning the requested pages, it stops. Error
927 * is returned only if 0 pages could be pinned.
17d51b10
MW
928 */
929int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
930{
6d0c48ae 931 const bool is_bvec = iov_iter_is_bvec(iter);
14eacf12
CH
932 int ret;
933
934 if (WARN_ON_ONCE(bio->bi_vcnt))
935 return -EINVAL;
17d51b10
MW
936
937 do {
6d0c48ae
JA
938 if (is_bvec)
939 ret = __bio_iov_bvec_add_pages(bio, iter);
940 else
941 ret = __bio_iov_iter_get_pages(bio, iter);
79d08f89 942 } while (!ret && iov_iter_count(iter) && !bio_full(bio, 0));
17d51b10 943
b6207430 944 if (is_bvec)
7321ecbf 945 bio_set_flag(bio, BIO_NO_PAGE_REF);
14eacf12 946 return bio->bi_vcnt ? 0 : ret;
17d51b10 947}
2cefe4db 948
4246a0b6 949static void submit_bio_wait_endio(struct bio *bio)
9e882242 950{
65e53aab 951 complete(bio->bi_private);
9e882242
KO
952}
953
954/**
955 * submit_bio_wait - submit a bio, and wait until it completes
9e882242
KO
956 * @bio: The &struct bio which describes the I/O
957 *
958 * Simple wrapper around submit_bio(). Returns 0 on success, or the error from
959 * bio_endio() on failure.
3d289d68
JK
960 *
961 * WARNING: Unlike to how submit_bio() is usually used, this function does not
962 * result in bio reference to be consumed. The caller must drop the reference
963 * on his own.
9e882242 964 */
4e49ea4a 965int submit_bio_wait(struct bio *bio)
9e882242 966{
e319e1fb 967 DECLARE_COMPLETION_ONSTACK_MAP(done, bio->bi_disk->lockdep_map);
9e882242 968
65e53aab 969 bio->bi_private = &done;
9e882242 970 bio->bi_end_io = submit_bio_wait_endio;
1eff9d32 971 bio->bi_opf |= REQ_SYNC;
4e49ea4a 972 submit_bio(bio);
65e53aab 973 wait_for_completion_io(&done);
9e882242 974
65e53aab 975 return blk_status_to_errno(bio->bi_status);
9e882242
KO
976}
977EXPORT_SYMBOL(submit_bio_wait);
978
054bdf64
KO
979/**
980 * bio_advance - increment/complete a bio by some number of bytes
981 * @bio: bio to advance
982 * @bytes: number of bytes to complete
983 *
984 * This updates bi_sector, bi_size and bi_idx; if the number of bytes to
985 * complete doesn't align with a bvec boundary, then bv_len and bv_offset will
986 * be updated on the last bvec as well.
987 *
988 * @bio will then represent the remaining, uncompleted portion of the io.
989 */
990void bio_advance(struct bio *bio, unsigned bytes)
991{
992 if (bio_integrity(bio))
993 bio_integrity_advance(bio, bytes);
994
4550dd6c 995 bio_advance_iter(bio, &bio->bi_iter, bytes);
054bdf64
KO
996}
997EXPORT_SYMBOL(bio_advance);
998
45db54d5
KO
999void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
1000 struct bio *src, struct bvec_iter *src_iter)
16ac3d63 1001{
1cb9dda4 1002 struct bio_vec src_bv, dst_bv;
16ac3d63 1003 void *src_p, *dst_p;
1cb9dda4 1004 unsigned bytes;
16ac3d63 1005
45db54d5
KO
1006 while (src_iter->bi_size && dst_iter->bi_size) {
1007 src_bv = bio_iter_iovec(src, *src_iter);
1008 dst_bv = bio_iter_iovec(dst, *dst_iter);
1cb9dda4
KO
1009
1010 bytes = min(src_bv.bv_len, dst_bv.bv_len);
16ac3d63 1011
1cb9dda4
KO
1012 src_p = kmap_atomic(src_bv.bv_page);
1013 dst_p = kmap_atomic(dst_bv.bv_page);
16ac3d63 1014
1cb9dda4
KO
1015 memcpy(dst_p + dst_bv.bv_offset,
1016 src_p + src_bv.bv_offset,
16ac3d63
KO
1017 bytes);
1018
1019 kunmap_atomic(dst_p);
1020 kunmap_atomic(src_p);
1021
6e6e811d
KO
1022 flush_dcache_page(dst_bv.bv_page);
1023
45db54d5
KO
1024 bio_advance_iter(src, src_iter, bytes);
1025 bio_advance_iter(dst, dst_iter, bytes);
16ac3d63
KO
1026 }
1027}
38a72dac
KO
1028EXPORT_SYMBOL(bio_copy_data_iter);
1029
1030/**
45db54d5
KO
1031 * bio_copy_data - copy contents of data buffers from one bio to another
1032 * @src: source bio
1033 * @dst: destination bio
38a72dac
KO
1034 *
1035 * Stops when it reaches the end of either @src or @dst - that is, copies
1036 * min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of bios).
1037 */
1038void bio_copy_data(struct bio *dst, struct bio *src)
1039{
45db54d5
KO
1040 struct bvec_iter src_iter = src->bi_iter;
1041 struct bvec_iter dst_iter = dst->bi_iter;
1042
1043 bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
38a72dac 1044}
16ac3d63
KO
1045EXPORT_SYMBOL(bio_copy_data);
1046
45db54d5
KO
1047/**
1048 * bio_list_copy_data - copy contents of data buffers from one chain of bios to
1049 * another
1050 * @src: source bio list
1051 * @dst: destination bio list
1052 *
1053 * Stops when it reaches the end of either the @src list or @dst list - that is,
1054 * copies min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of
1055 * bios).
1056 */
1057void bio_list_copy_data(struct bio *dst, struct bio *src)
1058{
1059 struct bvec_iter src_iter = src->bi_iter;
1060 struct bvec_iter dst_iter = dst->bi_iter;
1061
1062 while (1) {
1063 if (!src_iter.bi_size) {
1064 src = src->bi_next;
1065 if (!src)
1066 break;
1067
1068 src_iter = src->bi_iter;
1069 }
1070
1071 if (!dst_iter.bi_size) {
1072 dst = dst->bi_next;
1073 if (!dst)
1074 break;
1075
1076 dst_iter = dst->bi_iter;
1077 }
1078
1079 bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
1080 }
1081}
1082EXPORT_SYMBOL(bio_list_copy_data);
1083
1da177e4 1084struct bio_map_data {
152e283f 1085 int is_our_pages;
26e49cfc
KO
1086 struct iov_iter iter;
1087 struct iovec iov[];
1da177e4
LT
1088};
1089
0e5b935d 1090static struct bio_map_data *bio_alloc_map_data(struct iov_iter *data,
76029ff3 1091 gfp_t gfp_mask)
1da177e4 1092{
0e5b935d
AV
1093 struct bio_map_data *bmd;
1094 if (data->nr_segs > UIO_MAXIOV)
f3f63c1c 1095 return NULL;
1da177e4 1096
f1f8f292 1097 bmd = kmalloc(struct_size(bmd, iov, data->nr_segs), gfp_mask);
0e5b935d
AV
1098 if (!bmd)
1099 return NULL;
1100 memcpy(bmd->iov, data->iov, sizeof(struct iovec) * data->nr_segs);
1101 bmd->iter = *data;
1102 bmd->iter.iov = bmd->iov;
1103 return bmd;
1da177e4
LT
1104}
1105
9124d3fe
DP
1106/**
1107 * bio_copy_from_iter - copy all pages from iov_iter to bio
1108 * @bio: The &struct bio which describes the I/O as destination
1109 * @iter: iov_iter as source
1110 *
1111 * Copy all pages from iov_iter to bio.
1112 * Returns 0 on success, or error on failure.
1113 */
98a09d61 1114static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter)
c5dec1c3 1115{
c5dec1c3 1116 struct bio_vec *bvec;
6dc4f100 1117 struct bvec_iter_all iter_all;
c5dec1c3 1118
2b070cfe 1119 bio_for_each_segment_all(bvec, bio, iter_all) {
9124d3fe 1120 ssize_t ret;
c5dec1c3 1121
9124d3fe
DP
1122 ret = copy_page_from_iter(bvec->bv_page,
1123 bvec->bv_offset,
1124 bvec->bv_len,
98a09d61 1125 iter);
9124d3fe 1126
98a09d61 1127 if (!iov_iter_count(iter))
9124d3fe
DP
1128 break;
1129
1130 if (ret < bvec->bv_len)
1131 return -EFAULT;
c5dec1c3
FT
1132 }
1133
9124d3fe
DP
1134 return 0;
1135}
1136
1137/**
1138 * bio_copy_to_iter - copy all pages from bio to iov_iter
1139 * @bio: The &struct bio which describes the I/O as source
1140 * @iter: iov_iter as destination
1141 *
1142 * Copy all pages from bio to iov_iter.
1143 * Returns 0 on success, or error on failure.
1144 */
1145static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter)
1146{
9124d3fe 1147 struct bio_vec *bvec;
6dc4f100 1148 struct bvec_iter_all iter_all;
9124d3fe 1149
2b070cfe 1150 bio_for_each_segment_all(bvec, bio, iter_all) {
9124d3fe
DP
1151 ssize_t ret;
1152
1153 ret = copy_page_to_iter(bvec->bv_page,
1154 bvec->bv_offset,
1155 bvec->bv_len,
1156 &iter);
1157
1158 if (!iov_iter_count(&iter))
1159 break;
1160
1161 if (ret < bvec->bv_len)
1162 return -EFAULT;
1163 }
1164
1165 return 0;
c5dec1c3
FT
1166}
1167
491221f8 1168void bio_free_pages(struct bio *bio)
1dfa0f68
CH
1169{
1170 struct bio_vec *bvec;
6dc4f100 1171 struct bvec_iter_all iter_all;
1dfa0f68 1172
2b070cfe 1173 bio_for_each_segment_all(bvec, bio, iter_all)
1dfa0f68
CH
1174 __free_page(bvec->bv_page);
1175}
491221f8 1176EXPORT_SYMBOL(bio_free_pages);
1dfa0f68 1177
1da177e4
LT
1178/**
1179 * bio_uncopy_user - finish previously mapped bio
1180 * @bio: bio being terminated
1181 *
ddad8dd0 1182 * Free pages allocated from bio_copy_user_iov() and write back data
1da177e4
LT
1183 * to user space in case of a read.
1184 */
1185int bio_uncopy_user(struct bio *bio)
1186{
1187 struct bio_map_data *bmd = bio->bi_private;
1dfa0f68 1188 int ret = 0;
1da177e4 1189
35dc2483
RD
1190 if (!bio_flagged(bio, BIO_NULL_MAPPED)) {
1191 /*
1192 * if we're in a workqueue, the request is orphaned, so
2d99b55d
HR
1193 * don't copy into a random user address space, just free
1194 * and return -EINTR so user space doesn't expect any data.
35dc2483 1195 */
2d99b55d
HR
1196 if (!current->mm)
1197 ret = -EINTR;
1198 else if (bio_data_dir(bio) == READ)
9124d3fe 1199 ret = bio_copy_to_iter(bio, bmd->iter);
1dfa0f68
CH
1200 if (bmd->is_our_pages)
1201 bio_free_pages(bio);
35dc2483 1202 }
c8db4448 1203 kfree(bmd);
1da177e4
LT
1204 bio_put(bio);
1205 return ret;
1206}
1207
1208/**
c5dec1c3 1209 * bio_copy_user_iov - copy user data to bio
26e49cfc
KO
1210 * @q: destination block queue
1211 * @map_data: pointer to the rq_map_data holding pages (if necessary)
1212 * @iter: iovec iterator
1213 * @gfp_mask: memory allocation flags
1da177e4
LT
1214 *
1215 * Prepares and returns a bio for indirect user io, bouncing data
1216 * to/from kernel pages as necessary. Must be paired with
1217 * call bio_uncopy_user() on io completion.
1218 */
152e283f
FT
1219struct bio *bio_copy_user_iov(struct request_queue *q,
1220 struct rq_map_data *map_data,
e81cef5d 1221 struct iov_iter *iter,
26e49cfc 1222 gfp_t gfp_mask)
1da177e4 1223{
1da177e4 1224 struct bio_map_data *bmd;
1da177e4
LT
1225 struct page *page;
1226 struct bio *bio;
d16d44eb
AV
1227 int i = 0, ret;
1228 int nr_pages;
26e49cfc 1229 unsigned int len = iter->count;
bd5cecea 1230 unsigned int offset = map_data ? offset_in_page(map_data->offset) : 0;
1da177e4 1231
0e5b935d 1232 bmd = bio_alloc_map_data(iter, gfp_mask);
1da177e4
LT
1233 if (!bmd)
1234 return ERR_PTR(-ENOMEM);
1235
26e49cfc
KO
1236 /*
1237 * We need to do a deep copy of the iov_iter including the iovecs.
1238 * The caller provided iov might point to an on-stack or otherwise
1239 * shortlived one.
1240 */
1241 bmd->is_our_pages = map_data ? 0 : 1;
26e49cfc 1242
d16d44eb
AV
1243 nr_pages = DIV_ROUND_UP(offset + len, PAGE_SIZE);
1244 if (nr_pages > BIO_MAX_PAGES)
1245 nr_pages = BIO_MAX_PAGES;
26e49cfc 1246
1da177e4 1247 ret = -ENOMEM;
a9e9dc24 1248 bio = bio_kmalloc(gfp_mask, nr_pages);
1da177e4
LT
1249 if (!bio)
1250 goto out_bmd;
1251
1da177e4 1252 ret = 0;
56c451f4
FT
1253
1254 if (map_data) {
e623ddb4 1255 nr_pages = 1 << map_data->page_order;
56c451f4
FT
1256 i = map_data->offset / PAGE_SIZE;
1257 }
1da177e4 1258 while (len) {
e623ddb4 1259 unsigned int bytes = PAGE_SIZE;
1da177e4 1260
56c451f4
FT
1261 bytes -= offset;
1262
1da177e4
LT
1263 if (bytes > len)
1264 bytes = len;
1265
152e283f 1266 if (map_data) {
e623ddb4 1267 if (i == map_data->nr_entries * nr_pages) {
152e283f
FT
1268 ret = -ENOMEM;
1269 break;
1270 }
e623ddb4
FT
1271
1272 page = map_data->pages[i / nr_pages];
1273 page += (i % nr_pages);
1274
1275 i++;
1276 } else {
152e283f 1277 page = alloc_page(q->bounce_gfp | gfp_mask);
e623ddb4
FT
1278 if (!page) {
1279 ret = -ENOMEM;
1280 break;
1281 }
1da177e4
LT
1282 }
1283
a3761c3c
JG
1284 if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes) {
1285 if (!map_data)
1286 __free_page(page);
1da177e4 1287 break;
a3761c3c 1288 }
1da177e4
LT
1289
1290 len -= bytes;
56c451f4 1291 offset = 0;
1da177e4
LT
1292 }
1293
1294 if (ret)
1295 goto cleanup;
1296
2884d0be
AV
1297 if (map_data)
1298 map_data->offset += bio->bi_iter.bi_size;
1299
1da177e4
LT
1300 /*
1301 * success
1302 */
00e23707 1303 if ((iov_iter_rw(iter) == WRITE && (!map_data || !map_data->null_mapped)) ||
ecb554a8 1304 (map_data && map_data->from_user)) {
98a09d61 1305 ret = bio_copy_from_iter(bio, iter);
c5dec1c3
FT
1306 if (ret)
1307 goto cleanup;
98a09d61 1308 } else {
f55adad6
KB
1309 if (bmd->is_our_pages)
1310 zero_fill_bio(bio);
98a09d61 1311 iov_iter_advance(iter, bio->bi_iter.bi_size);
1da177e4
LT
1312 }
1313
26e49cfc 1314 bio->bi_private = bmd;
2884d0be
AV
1315 if (map_data && map_data->null_mapped)
1316 bio_set_flag(bio, BIO_NULL_MAPPED);
1da177e4
LT
1317 return bio;
1318cleanup:
152e283f 1319 if (!map_data)
1dfa0f68 1320 bio_free_pages(bio);
1da177e4
LT
1321 bio_put(bio);
1322out_bmd:
c8db4448 1323 kfree(bmd);
1da177e4
LT
1324 return ERR_PTR(ret);
1325}
1326
37f19e57
CH
1327/**
1328 * bio_map_user_iov - map user iovec into bio
1329 * @q: the struct request_queue for the bio
1330 * @iter: iovec iterator
1331 * @gfp_mask: memory allocation flags
1332 *
1333 * Map the user space address into a bio suitable for io to a block
1334 * device. Returns an error pointer in case of error.
1335 */
1336struct bio *bio_map_user_iov(struct request_queue *q,
e81cef5d 1337 struct iov_iter *iter,
37f19e57 1338 gfp_t gfp_mask)
1da177e4 1339{
26e49cfc 1340 int j;
1da177e4 1341 struct bio *bio;
076098e5 1342 int ret;
1da177e4 1343
b282cc76 1344 if (!iov_iter_count(iter))
1da177e4
LT
1345 return ERR_PTR(-EINVAL);
1346
b282cc76 1347 bio = bio_kmalloc(gfp_mask, iov_iter_npages(iter, BIO_MAX_PAGES));
1da177e4
LT
1348 if (!bio)
1349 return ERR_PTR(-ENOMEM);
1350
0a0f1513 1351 while (iov_iter_count(iter)) {
629e42bc 1352 struct page **pages;
076098e5
AV
1353 ssize_t bytes;
1354 size_t offs, added = 0;
1355 int npages;
1da177e4 1356
0a0f1513 1357 bytes = iov_iter_get_pages_alloc(iter, &pages, LONG_MAX, &offs);
076098e5
AV
1358 if (unlikely(bytes <= 0)) {
1359 ret = bytes ? bytes : -EFAULT;
f1970baf 1360 goto out_unmap;
99172157 1361 }
f1970baf 1362
076098e5 1363 npages = DIV_ROUND_UP(offs + bytes, PAGE_SIZE);
f1970baf 1364
98f0bc99
AV
1365 if (unlikely(offs & queue_dma_alignment(q))) {
1366 ret = -EINVAL;
1367 j = 0;
1368 } else {
1369 for (j = 0; j < npages; j++) {
1370 struct page *page = pages[j];
1371 unsigned int n = PAGE_SIZE - offs;
d1916c86 1372 bool same_page = false;
f1970baf 1373
98f0bc99
AV
1374 if (n > bytes)
1375 n = bytes;
95d78c28 1376
19047087 1377 if (!__bio_add_pc_page(q, bio, page, n, offs,
d1916c86
CH
1378 &same_page)) {
1379 if (same_page)
1380 put_page(page);
98f0bc99 1381 break;
d1916c86 1382 }
1da177e4 1383
98f0bc99
AV
1384 added += n;
1385 bytes -= n;
1386 offs = 0;
1387 }
0a0f1513 1388 iov_iter_advance(iter, added);
f1970baf 1389 }
1da177e4 1390 /*
f1970baf 1391 * release the pages we didn't map into the bio, if any
1da177e4 1392 */
629e42bc 1393 while (j < npages)
09cbfeaf 1394 put_page(pages[j++]);
629e42bc 1395 kvfree(pages);
e2e115d1
AV
1396 /* couldn't stuff something into bio? */
1397 if (bytes)
1398 break;
1da177e4
LT
1399 }
1400
b7c44ed9 1401 bio_set_flag(bio, BIO_USER_MAPPED);
37f19e57
CH
1402
1403 /*
5fad1b64 1404 * subtle -- if bio_map_user_iov() ended up bouncing a bio,
37f19e57
CH
1405 * it would normally disappear when its bi_end_io is run.
1406 * however, we need it for the unmap, so grab an extra
1407 * reference to it
1408 */
1409 bio_get(bio);
1da177e4 1410 return bio;
f1970baf
JB
1411
1412 out_unmap:
506e0798 1413 bio_release_pages(bio, false);
1da177e4
LT
1414 bio_put(bio);
1415 return ERR_PTR(ret);
1416}
1417
1da177e4
LT
1418/**
1419 * bio_unmap_user - unmap a bio
1420 * @bio: the bio being unmapped
1421 *
5fad1b64
BVA
1422 * Unmap a bio previously mapped by bio_map_user_iov(). Must be called from
1423 * process context.
1da177e4
LT
1424 *
1425 * bio_unmap_user() may sleep.
1426 */
1427void bio_unmap_user(struct bio *bio)
1428{
163cc2d3
CH
1429 bio_release_pages(bio, bio_data_dir(bio) == READ);
1430 bio_put(bio);
1da177e4
LT
1431 bio_put(bio);
1432}
1433
b4c5875d
DLM
1434static void bio_invalidate_vmalloc_pages(struct bio *bio)
1435{
1436#ifdef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
1437 if (bio->bi_private && !op_is_write(bio_op(bio))) {
1438 unsigned long i, len = 0;
1439
1440 for (i = 0; i < bio->bi_vcnt; i++)
1441 len += bio->bi_io_vec[i].bv_len;
1442 invalidate_kernel_vmap_range(bio->bi_private, len);
1443 }
1444#endif
1445}
1446
4246a0b6 1447static void bio_map_kern_endio(struct bio *bio)
b823825e 1448{
b4c5875d 1449 bio_invalidate_vmalloc_pages(bio);
b823825e 1450 bio_put(bio);
b823825e
JA
1451}
1452
75c72b83
CH
1453/**
1454 * bio_map_kern - map kernel address into bio
1455 * @q: the struct request_queue for the bio
1456 * @data: pointer to buffer to map
1457 * @len: length in bytes
1458 * @gfp_mask: allocation flags for bio allocation
1459 *
1460 * Map the kernel address into a bio suitable for io to a block
1461 * device. Returns an error pointer in case of error.
1462 */
1463struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len,
1464 gfp_t gfp_mask)
df46b9a4
MC
1465{
1466 unsigned long kaddr = (unsigned long)data;
1467 unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1468 unsigned long start = kaddr >> PAGE_SHIFT;
1469 const int nr_pages = end - start;
b4c5875d
DLM
1470 bool is_vmalloc = is_vmalloc_addr(data);
1471 struct page *page;
df46b9a4
MC
1472 int offset, i;
1473 struct bio *bio;
1474
a9e9dc24 1475 bio = bio_kmalloc(gfp_mask, nr_pages);
df46b9a4
MC
1476 if (!bio)
1477 return ERR_PTR(-ENOMEM);
1478
b4c5875d
DLM
1479 if (is_vmalloc) {
1480 flush_kernel_vmap_range(data, len);
1481 bio->bi_private = data;
1482 }
1483
df46b9a4
MC
1484 offset = offset_in_page(kaddr);
1485 for (i = 0; i < nr_pages; i++) {
1486 unsigned int bytes = PAGE_SIZE - offset;
1487
1488 if (len <= 0)
1489 break;
1490
1491 if (bytes > len)
1492 bytes = len;
1493
b4c5875d
DLM
1494 if (!is_vmalloc)
1495 page = virt_to_page(data);
1496 else
1497 page = vmalloc_to_page(data);
1498 if (bio_add_pc_page(q, bio, page, bytes,
75c72b83
CH
1499 offset) < bytes) {
1500 /* we don't support partial mappings */
1501 bio_put(bio);
1502 return ERR_PTR(-EINVAL);
1503 }
df46b9a4
MC
1504
1505 data += bytes;
1506 len -= bytes;
1507 offset = 0;
1508 }
1509
b823825e 1510 bio->bi_end_io = bio_map_kern_endio;
df46b9a4
MC
1511 return bio;
1512}
df46b9a4 1513
4246a0b6 1514static void bio_copy_kern_endio(struct bio *bio)
68154e90 1515{
1dfa0f68
CH
1516 bio_free_pages(bio);
1517 bio_put(bio);
1518}
1519
4246a0b6 1520static void bio_copy_kern_endio_read(struct bio *bio)
1dfa0f68 1521{
42d2683a 1522 char *p = bio->bi_private;
1dfa0f68 1523 struct bio_vec *bvec;
6dc4f100 1524 struct bvec_iter_all iter_all;
68154e90 1525
2b070cfe 1526 bio_for_each_segment_all(bvec, bio, iter_all) {
1dfa0f68 1527 memcpy(p, page_address(bvec->bv_page), bvec->bv_len);
c8db4448 1528 p += bvec->bv_len;
68154e90
FT
1529 }
1530
4246a0b6 1531 bio_copy_kern_endio(bio);
68154e90
FT
1532}
1533
1534/**
1535 * bio_copy_kern - copy kernel address into bio
1536 * @q: the struct request_queue for the bio
1537 * @data: pointer to buffer to copy
1538 * @len: length in bytes
1539 * @gfp_mask: allocation flags for bio and page allocation
ffee0259 1540 * @reading: data direction is READ
68154e90
FT
1541 *
1542 * copy the kernel address into a bio suitable for io to a block
1543 * device. Returns an error pointer in case of error.
1544 */
1545struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
1546 gfp_t gfp_mask, int reading)
1547{
42d2683a
CH
1548 unsigned long kaddr = (unsigned long)data;
1549 unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1550 unsigned long start = kaddr >> PAGE_SHIFT;
42d2683a
CH
1551 struct bio *bio;
1552 void *p = data;
1dfa0f68 1553 int nr_pages = 0;
68154e90 1554
42d2683a
CH
1555 /*
1556 * Overflow, abort
1557 */
1558 if (end < start)
1559 return ERR_PTR(-EINVAL);
68154e90 1560
42d2683a
CH
1561 nr_pages = end - start;
1562 bio = bio_kmalloc(gfp_mask, nr_pages);
1563 if (!bio)
1564 return ERR_PTR(-ENOMEM);
68154e90 1565
42d2683a
CH
1566 while (len) {
1567 struct page *page;
1568 unsigned int bytes = PAGE_SIZE;
68154e90 1569
42d2683a
CH
1570 if (bytes > len)
1571 bytes = len;
1572
1573 page = alloc_page(q->bounce_gfp | gfp_mask);
1574 if (!page)
1575 goto cleanup;
1576
1577 if (!reading)
1578 memcpy(page_address(page), p, bytes);
1579
1580 if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes)
1581 break;
1582
1583 len -= bytes;
1584 p += bytes;
68154e90
FT
1585 }
1586
1dfa0f68
CH
1587 if (reading) {
1588 bio->bi_end_io = bio_copy_kern_endio_read;
1589 bio->bi_private = data;
1590 } else {
1591 bio->bi_end_io = bio_copy_kern_endio;
1dfa0f68 1592 }
76029ff3 1593
68154e90 1594 return bio;
42d2683a
CH
1595
1596cleanup:
1dfa0f68 1597 bio_free_pages(bio);
42d2683a
CH
1598 bio_put(bio);
1599 return ERR_PTR(-ENOMEM);
68154e90
FT
1600}
1601
1da177e4
LT
1602/*
1603 * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
1604 * for performing direct-IO in BIOs.
1605 *
1606 * The problem is that we cannot run set_page_dirty() from interrupt context
1607 * because the required locks are not interrupt-safe. So what we can do is to
1608 * mark the pages dirty _before_ performing IO. And in interrupt context,
1609 * check that the pages are still dirty. If so, fine. If not, redirty them
1610 * in process context.
1611 *
1612 * We special-case compound pages here: normally this means reads into hugetlb
1613 * pages. The logic in here doesn't really work right for compound pages
1614 * because the VM does not uniformly chase down the head page in all cases.
1615 * But dirtiness of compound pages is pretty meaningless anyway: the VM doesn't
1616 * handle them at all. So we skip compound pages here at an early stage.
1617 *
1618 * Note that this code is very hard to test under normal circumstances because
1619 * direct-io pins the pages with get_user_pages(). This makes
1620 * is_page_cache_freeable return false, and the VM will not clean the pages.
0d5c3eba 1621 * But other code (eg, flusher threads) could clean the pages if they are mapped
1da177e4
LT
1622 * pagecache.
1623 *
1624 * Simply disabling the call to bio_set_pages_dirty() is a good way to test the
1625 * deferred bio dirtying paths.
1626 */
1627
1628/*
1629 * bio_set_pages_dirty() will mark all the bio's pages as dirty.
1630 */
1631void bio_set_pages_dirty(struct bio *bio)
1632{
cb34e057 1633 struct bio_vec *bvec;
6dc4f100 1634 struct bvec_iter_all iter_all;
1da177e4 1635
2b070cfe 1636 bio_for_each_segment_all(bvec, bio, iter_all) {
3bb50983
CH
1637 if (!PageCompound(bvec->bv_page))
1638 set_page_dirty_lock(bvec->bv_page);
1da177e4
LT
1639 }
1640}
1641
1da177e4
LT
1642/*
1643 * bio_check_pages_dirty() will check that all the BIO's pages are still dirty.
1644 * If they are, then fine. If, however, some pages are clean then they must
1645 * have been written out during the direct-IO read. So we take another ref on
24d5493f 1646 * the BIO and re-dirty the pages in process context.
1da177e4
LT
1647 *
1648 * It is expected that bio_check_pages_dirty() will wholly own the BIO from
ea1754a0
KS
1649 * here on. It will run one put_page() against each page and will run one
1650 * bio_put() against the BIO.
1da177e4
LT
1651 */
1652
65f27f38 1653static void bio_dirty_fn(struct work_struct *work);
1da177e4 1654
65f27f38 1655static DECLARE_WORK(bio_dirty_work, bio_dirty_fn);
1da177e4
LT
1656static DEFINE_SPINLOCK(bio_dirty_lock);
1657static struct bio *bio_dirty_list;
1658
1659/*
1660 * This runs in process context
1661 */
65f27f38 1662static void bio_dirty_fn(struct work_struct *work)
1da177e4 1663{
24d5493f 1664 struct bio *bio, *next;
1da177e4 1665
24d5493f
CH
1666 spin_lock_irq(&bio_dirty_lock);
1667 next = bio_dirty_list;
1da177e4 1668 bio_dirty_list = NULL;
24d5493f 1669 spin_unlock_irq(&bio_dirty_lock);
1da177e4 1670
24d5493f
CH
1671 while ((bio = next) != NULL) {
1672 next = bio->bi_private;
1da177e4 1673
d241a95f 1674 bio_release_pages(bio, true);
1da177e4 1675 bio_put(bio);
1da177e4
LT
1676 }
1677}
1678
1679void bio_check_pages_dirty(struct bio *bio)
1680{
cb34e057 1681 struct bio_vec *bvec;
24d5493f 1682 unsigned long flags;
6dc4f100 1683 struct bvec_iter_all iter_all;
1da177e4 1684
2b070cfe 1685 bio_for_each_segment_all(bvec, bio, iter_all) {
24d5493f
CH
1686 if (!PageDirty(bvec->bv_page) && !PageCompound(bvec->bv_page))
1687 goto defer;
1da177e4
LT
1688 }
1689
d241a95f 1690 bio_release_pages(bio, false);
24d5493f
CH
1691 bio_put(bio);
1692 return;
1693defer:
1694 spin_lock_irqsave(&bio_dirty_lock, flags);
1695 bio->bi_private = bio_dirty_list;
1696 bio_dirty_list = bio;
1697 spin_unlock_irqrestore(&bio_dirty_lock, flags);
1698 schedule_work(&bio_dirty_work);
1da177e4
LT
1699}
1700
5b18b5a7
MP
1701void update_io_ticks(struct hd_struct *part, unsigned long now)
1702{
1703 unsigned long stamp;
1704again:
1705 stamp = READ_ONCE(part->stamp);
1706 if (unlikely(stamp != now)) {
1707 if (likely(cmpxchg(&part->stamp, stamp, now) == stamp)) {
1708 __part_stat_add(part, io_ticks, 1);
1709 }
1710 }
1711 if (part->partno) {
1712 part = &part_to_disk(part)->part0;
1713 goto again;
1714 }
1715}
1da177e4 1716
ddcf35d3 1717void generic_start_io_acct(struct request_queue *q, int op,
d62e26b3 1718 unsigned long sectors, struct hd_struct *part)
394ffa50 1719{
ddcf35d3 1720 const int sgrp = op_stat_group(op);
394ffa50 1721
112f158f
MS
1722 part_stat_lock();
1723
5b18b5a7 1724 update_io_ticks(part, jiffies);
112f158f
MS
1725 part_stat_inc(part, ios[sgrp]);
1726 part_stat_add(part, sectors[sgrp], sectors);
ddcf35d3 1727 part_inc_in_flight(q, part, op_is_write(op));
394ffa50
GZ
1728
1729 part_stat_unlock();
1730}
1731EXPORT_SYMBOL(generic_start_io_acct);
1732
ddcf35d3 1733void generic_end_io_acct(struct request_queue *q, int req_op,
d62e26b3 1734 struct hd_struct *part, unsigned long start_time)
394ffa50 1735{
5b18b5a7
MP
1736 unsigned long now = jiffies;
1737 unsigned long duration = now - start_time;
ddcf35d3 1738 const int sgrp = op_stat_group(req_op);
394ffa50 1739
112f158f
MS
1740 part_stat_lock();
1741
5b18b5a7 1742 update_io_ticks(part, now);
112f158f 1743 part_stat_add(part, nsecs[sgrp], jiffies_to_nsecs(duration));
5b18b5a7 1744 part_stat_add(part, time_in_queue, duration);
ddcf35d3 1745 part_dec_in_flight(q, part, op_is_write(req_op));
394ffa50
GZ
1746
1747 part_stat_unlock();
1748}
1749EXPORT_SYMBOL(generic_end_io_acct);
1750
c4cf5261
JA
1751static inline bool bio_remaining_done(struct bio *bio)
1752{
1753 /*
1754 * If we're not chaining, then ->__bi_remaining is always 1 and
1755 * we always end io on the first invocation.
1756 */
1757 if (!bio_flagged(bio, BIO_CHAIN))
1758 return true;
1759
1760 BUG_ON(atomic_read(&bio->__bi_remaining) <= 0);
1761
326e1dbb 1762 if (atomic_dec_and_test(&bio->__bi_remaining)) {
b7c44ed9 1763 bio_clear_flag(bio, BIO_CHAIN);
c4cf5261 1764 return true;
326e1dbb 1765 }
c4cf5261
JA
1766
1767 return false;
1768}
1769
1da177e4
LT
1770/**
1771 * bio_endio - end I/O on a bio
1772 * @bio: bio
1da177e4
LT
1773 *
1774 * Description:
4246a0b6
CH
1775 * bio_endio() will end I/O on the whole bio. bio_endio() is the preferred
1776 * way to end I/O on a bio. No one should call bi_end_io() directly on a
1777 * bio unless they own it and thus know that it has an end_io function.
fbbaf700
N
1778 *
1779 * bio_endio() can be called several times on a bio that has been chained
1780 * using bio_chain(). The ->bi_end_io() function will only be called the
1781 * last time. At this point the BLK_TA_COMPLETE tracing event will be
1782 * generated if BIO_TRACE_COMPLETION is set.
1da177e4 1783 **/
4246a0b6 1784void bio_endio(struct bio *bio)
1da177e4 1785{
ba8c6967 1786again:
2b885517 1787 if (!bio_remaining_done(bio))
ba8c6967 1788 return;
7c20f116
CH
1789 if (!bio_integrity_endio(bio))
1790 return;
1da177e4 1791
67b42d0b
JB
1792 if (bio->bi_disk)
1793 rq_qos_done_bio(bio->bi_disk->queue, bio);
1794
ba8c6967
CH
1795 /*
1796 * Need to have a real endio function for chained bios, otherwise
1797 * various corner cases will break (like stacking block devices that
1798 * save/restore bi_end_io) - however, we want to avoid unbounded
1799 * recursion and blowing the stack. Tail call optimization would
1800 * handle this, but compiling with frame pointers also disables
1801 * gcc's sibling call optimization.
1802 */
1803 if (bio->bi_end_io == bio_chain_endio) {
1804 bio = __bio_chain_endio(bio);
1805 goto again;
196d38bc 1806 }
ba8c6967 1807
74d46992
CH
1808 if (bio->bi_disk && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
1809 trace_block_bio_complete(bio->bi_disk->queue, bio,
a462b950 1810 blk_status_to_errno(bio->bi_status));
fbbaf700
N
1811 bio_clear_flag(bio, BIO_TRACE_COMPLETION);
1812 }
1813
9e234eea 1814 blk_throtl_bio_endio(bio);
b222dd2f
SL
1815 /* release cgroup info */
1816 bio_uninit(bio);
ba8c6967
CH
1817 if (bio->bi_end_io)
1818 bio->bi_end_io(bio);
1da177e4 1819}
a112a71d 1820EXPORT_SYMBOL(bio_endio);
1da177e4 1821
20d0189b
KO
1822/**
1823 * bio_split - split a bio
1824 * @bio: bio to split
1825 * @sectors: number of sectors to split from the front of @bio
1826 * @gfp: gfp mask
1827 * @bs: bio set to allocate from
1828 *
1829 * Allocates and returns a new bio which represents @sectors from the start of
1830 * @bio, and updates @bio to represent the remaining sectors.
1831 *
f3f5da62 1832 * Unless this is a discard request the newly allocated bio will point
dad77584
BVA
1833 * to @bio's bi_io_vec. It is the caller's responsibility to ensure that
1834 * neither @bio nor @bs are freed before the split bio.
20d0189b
KO
1835 */
1836struct bio *bio_split(struct bio *bio, int sectors,
1837 gfp_t gfp, struct bio_set *bs)
1838{
f341a4d3 1839 struct bio *split;
20d0189b
KO
1840
1841 BUG_ON(sectors <= 0);
1842 BUG_ON(sectors >= bio_sectors(bio));
1843
f9d03f96 1844 split = bio_clone_fast(bio, gfp, bs);
20d0189b
KO
1845 if (!split)
1846 return NULL;
1847
1848 split->bi_iter.bi_size = sectors << 9;
1849
1850 if (bio_integrity(split))
fbd08e76 1851 bio_integrity_trim(split);
20d0189b
KO
1852
1853 bio_advance(bio, split->bi_iter.bi_size);
1854
fbbaf700 1855 if (bio_flagged(bio, BIO_TRACE_COMPLETION))
20d59023 1856 bio_set_flag(split, BIO_TRACE_COMPLETION);
fbbaf700 1857
20d0189b
KO
1858 return split;
1859}
1860EXPORT_SYMBOL(bio_split);
1861
6678d83f
KO
1862/**
1863 * bio_trim - trim a bio
1864 * @bio: bio to trim
1865 * @offset: number of sectors to trim from the front of @bio
1866 * @size: size we want to trim @bio to, in sectors
1867 */
1868void bio_trim(struct bio *bio, int offset, int size)
1869{
1870 /* 'bio' is a cloned bio which we need to trim to match
1871 * the given offset and size.
6678d83f 1872 */
6678d83f
KO
1873
1874 size <<= 9;
4f024f37 1875 if (offset == 0 && size == bio->bi_iter.bi_size)
6678d83f
KO
1876 return;
1877
6678d83f 1878 bio_advance(bio, offset << 9);
4f024f37 1879 bio->bi_iter.bi_size = size;
376a78ab
DM
1880
1881 if (bio_integrity(bio))
fbd08e76 1882 bio_integrity_trim(bio);
376a78ab 1883
6678d83f
KO
1884}
1885EXPORT_SYMBOL_GPL(bio_trim);
1886
1da177e4
LT
1887/*
1888 * create memory pools for biovec's in a bio_set.
1889 * use the global biovec slabs created for general use.
1890 */
8aa6ba2f 1891int biovec_init_pool(mempool_t *pool, int pool_entries)
1da177e4 1892{
ed996a52 1893 struct biovec_slab *bp = bvec_slabs + BVEC_POOL_MAX;
1da177e4 1894
8aa6ba2f 1895 return mempool_init_slab_pool(pool, pool_entries, bp->slab);
1da177e4
LT
1896}
1897
917a38c7
KO
1898/*
1899 * bioset_exit - exit a bioset initialized with bioset_init()
1900 *
1901 * May be called on a zeroed but uninitialized bioset (i.e. allocated with
1902 * kzalloc()).
1903 */
1904void bioset_exit(struct bio_set *bs)
1da177e4 1905{
df2cb6da
KO
1906 if (bs->rescue_workqueue)
1907 destroy_workqueue(bs->rescue_workqueue);
917a38c7 1908 bs->rescue_workqueue = NULL;
df2cb6da 1909
8aa6ba2f
KO
1910 mempool_exit(&bs->bio_pool);
1911 mempool_exit(&bs->bvec_pool);
9f060e22 1912
7878cba9 1913 bioset_integrity_free(bs);
917a38c7
KO
1914 if (bs->bio_slab)
1915 bio_put_slab(bs);
1916 bs->bio_slab = NULL;
1917}
1918EXPORT_SYMBOL(bioset_exit);
1da177e4 1919
917a38c7
KO
1920/**
1921 * bioset_init - Initialize a bio_set
dad08527 1922 * @bs: pool to initialize
917a38c7
KO
1923 * @pool_size: Number of bio and bio_vecs to cache in the mempool
1924 * @front_pad: Number of bytes to allocate in front of the returned bio
1925 * @flags: Flags to modify behavior, currently %BIOSET_NEED_BVECS
1926 * and %BIOSET_NEED_RESCUER
1927 *
dad08527
KO
1928 * Description:
1929 * Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller
1930 * to ask for a number of bytes to be allocated in front of the bio.
1931 * Front pad allocation is useful for embedding the bio inside
1932 * another structure, to avoid allocating extra data to go with the bio.
1933 * Note that the bio must be embedded at the END of that structure always,
1934 * or things will break badly.
1935 * If %BIOSET_NEED_BVECS is set in @flags, a separate pool will be allocated
1936 * for allocating iovecs. This pool is not needed e.g. for bio_clone_fast().
1937 * If %BIOSET_NEED_RESCUER is set, a workqueue is created which can be used to
1938 * dispatch queued requests when the mempool runs out of space.
1939 *
917a38c7
KO
1940 */
1941int bioset_init(struct bio_set *bs,
1942 unsigned int pool_size,
1943 unsigned int front_pad,
1944 int flags)
1945{
1946 unsigned int back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec);
1947
1948 bs->front_pad = front_pad;
1949
1950 spin_lock_init(&bs->rescue_lock);
1951 bio_list_init(&bs->rescue_list);
1952 INIT_WORK(&bs->rescue_work, bio_alloc_rescue);
1953
1954 bs->bio_slab = bio_find_or_create_slab(front_pad + back_pad);
1955 if (!bs->bio_slab)
1956 return -ENOMEM;
1957
1958 if (mempool_init_slab_pool(&bs->bio_pool, pool_size, bs->bio_slab))
1959 goto bad;
1960
1961 if ((flags & BIOSET_NEED_BVECS) &&
1962 biovec_init_pool(&bs->bvec_pool, pool_size))
1963 goto bad;
1964
1965 if (!(flags & BIOSET_NEED_RESCUER))
1966 return 0;
1967
1968 bs->rescue_workqueue = alloc_workqueue("bioset", WQ_MEM_RECLAIM, 0);
1969 if (!bs->rescue_workqueue)
1970 goto bad;
1971
1972 return 0;
1973bad:
1974 bioset_exit(bs);
1975 return -ENOMEM;
1976}
1977EXPORT_SYMBOL(bioset_init);
1978
28e89fd9
JA
1979/*
1980 * Initialize and setup a new bio_set, based on the settings from
1981 * another bio_set.
1982 */
1983int bioset_init_from_src(struct bio_set *bs, struct bio_set *src)
1984{
1985 int flags;
1986
1987 flags = 0;
1988 if (src->bvec_pool.min_nr)
1989 flags |= BIOSET_NEED_BVECS;
1990 if (src->rescue_workqueue)
1991 flags |= BIOSET_NEED_RESCUER;
1992
1993 return bioset_init(bs, src->bio_pool.min_nr, src->front_pad, flags);
1994}
1995EXPORT_SYMBOL(bioset_init_from_src);
1996
852c788f 1997#ifdef CONFIG_BLK_CGROUP
1d933cf0 1998
74b7c02a 1999/**
2268c0fe 2000 * bio_disassociate_blkg - puts back the blkg reference if associated
74b7c02a 2001 * @bio: target bio
74b7c02a 2002 *
2268c0fe 2003 * Helper to disassociate the blkg from @bio if a blkg is associated.
74b7c02a 2004 */
2268c0fe 2005void bio_disassociate_blkg(struct bio *bio)
74b7c02a 2006{
2268c0fe
DZ
2007 if (bio->bi_blkg) {
2008 blkg_put(bio->bi_blkg);
2009 bio->bi_blkg = NULL;
2010 }
74b7c02a 2011}
892ad71f 2012EXPORT_SYMBOL_GPL(bio_disassociate_blkg);
74b7c02a 2013
a7b39b4e 2014/**
2268c0fe 2015 * __bio_associate_blkg - associate a bio with the a blkg
a7b39b4e 2016 * @bio: target bio
b5f2954d 2017 * @blkg: the blkg to associate
b5f2954d 2018 *
beea9da0
DZ
2019 * This tries to associate @bio with the specified @blkg. Association failure
2020 * is handled by walking up the blkg tree. Therefore, the blkg associated can
2021 * be anything between @blkg and the root_blkg. This situation only happens
2022 * when a cgroup is dying and then the remaining bios will spill to the closest
2023 * alive blkg.
a7b39b4e 2024 *
beea9da0
DZ
2025 * A reference will be taken on the @blkg and will be released when @bio is
2026 * freed.
a7b39b4e 2027 */
2268c0fe 2028static void __bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg)
a7b39b4e 2029{
2268c0fe
DZ
2030 bio_disassociate_blkg(bio);
2031
7754f669 2032 bio->bi_blkg = blkg_tryget_closest(blkg);
a7b39b4e
DZF
2033}
2034
d459d853 2035/**
fd42df30 2036 * bio_associate_blkg_from_css - associate a bio with a specified css
d459d853 2037 * @bio: target bio
fd42df30 2038 * @css: target css
d459d853 2039 *
fd42df30 2040 * Associate @bio with the blkg found by combining the css's blkg and the
fc5a828b
DZ
2041 * request_queue of the @bio. This falls back to the queue's root_blkg if
2042 * the association fails with the css.
d459d853 2043 */
fd42df30
DZ
2044void bio_associate_blkg_from_css(struct bio *bio,
2045 struct cgroup_subsys_state *css)
d459d853 2046{
fc5a828b
DZ
2047 struct request_queue *q = bio->bi_disk->queue;
2048 struct blkcg_gq *blkg;
2049
2050 rcu_read_lock();
2051
2052 if (!css || !css->parent)
2053 blkg = q->root_blkg;
2054 else
2055 blkg = blkg_lookup_create(css_to_blkcg(css), q);
2056
2057 __bio_associate_blkg(bio, blkg);
2058
2059 rcu_read_unlock();
d459d853 2060}
fd42df30 2061EXPORT_SYMBOL_GPL(bio_associate_blkg_from_css);
d459d853 2062
6a7f6d86 2063#ifdef CONFIG_MEMCG
852c788f 2064/**
6a7f6d86 2065 * bio_associate_blkg_from_page - associate a bio with the page's blkg
852c788f 2066 * @bio: target bio
6a7f6d86
DZ
2067 * @page: the page to lookup the blkcg from
2068 *
2069 * Associate @bio with the blkg from @page's owning memcg and the respective
fc5a828b
DZ
2070 * request_queue. If cgroup_e_css returns %NULL, fall back to the queue's
2071 * root_blkg.
852c788f 2072 */
6a7f6d86 2073void bio_associate_blkg_from_page(struct bio *bio, struct page *page)
852c788f 2074{
6a7f6d86
DZ
2075 struct cgroup_subsys_state *css;
2076
6a7f6d86
DZ
2077 if (!page->mem_cgroup)
2078 return;
2079
fc5a828b
DZ
2080 rcu_read_lock();
2081
2082 css = cgroup_e_css(page->mem_cgroup->css.cgroup, &io_cgrp_subsys);
2083 bio_associate_blkg_from_css(bio, css);
2084
2085 rcu_read_unlock();
6a7f6d86
DZ
2086}
2087#endif /* CONFIG_MEMCG */
2088
2268c0fe
DZ
2089/**
2090 * bio_associate_blkg - associate a bio with a blkg
2091 * @bio: target bio
2092 *
2093 * Associate @bio with the blkg found from the bio's css and request_queue.
2094 * If one is not found, bio_lookup_blkg() creates the blkg. If a blkg is
2095 * already associated, the css is reused and association redone as the
2096 * request_queue may have changed.
2097 */
2098void bio_associate_blkg(struct bio *bio)
2099{
fc5a828b 2100 struct cgroup_subsys_state *css;
2268c0fe
DZ
2101
2102 rcu_read_lock();
2103
db6638d7 2104 if (bio->bi_blkg)
fc5a828b 2105 css = &bio_blkcg(bio)->css;
db6638d7 2106 else
fc5a828b 2107 css = blkcg_css();
2268c0fe 2108
fc5a828b 2109 bio_associate_blkg_from_css(bio, css);
2268c0fe
DZ
2110
2111 rcu_read_unlock();
852c788f 2112}
5cdf2e3f 2113EXPORT_SYMBOL_GPL(bio_associate_blkg);
852c788f 2114
20bd723e 2115/**
db6638d7 2116 * bio_clone_blkg_association - clone blkg association from src to dst bio
20bd723e
PV
2117 * @dst: destination bio
2118 * @src: source bio
2119 */
db6638d7 2120void bio_clone_blkg_association(struct bio *dst, struct bio *src)
20bd723e 2121{
6ab21879
DZ
2122 rcu_read_lock();
2123
fc5a828b 2124 if (src->bi_blkg)
2268c0fe 2125 __bio_associate_blkg(dst, src->bi_blkg);
6ab21879
DZ
2126
2127 rcu_read_unlock();
20bd723e 2128}
db6638d7 2129EXPORT_SYMBOL_GPL(bio_clone_blkg_association);
852c788f
TH
2130#endif /* CONFIG_BLK_CGROUP */
2131
1da177e4
LT
2132static void __init biovec_init_slabs(void)
2133{
2134 int i;
2135
ed996a52 2136 for (i = 0; i < BVEC_POOL_NR; i++) {
1da177e4
LT
2137 int size;
2138 struct biovec_slab *bvs = bvec_slabs + i;
2139
a7fcd37c
JA
2140 if (bvs->nr_vecs <= BIO_INLINE_VECS) {
2141 bvs->slab = NULL;
2142 continue;
2143 }
a7fcd37c 2144
1da177e4
LT
2145 size = bvs->nr_vecs * sizeof(struct bio_vec);
2146 bvs->slab = kmem_cache_create(bvs->name, size, 0,
20c2df83 2147 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
1da177e4
LT
2148 }
2149}
2150
2151static int __init init_bio(void)
2152{
bb799ca0
JA
2153 bio_slab_max = 2;
2154 bio_slab_nr = 0;
6396bb22
KC
2155 bio_slabs = kcalloc(bio_slab_max, sizeof(struct bio_slab),
2156 GFP_KERNEL);
2b24e6f6
JT
2157
2158 BUILD_BUG_ON(BIO_FLAG_LAST > BVEC_POOL_OFFSET);
2159
bb799ca0
JA
2160 if (!bio_slabs)
2161 panic("bio: can't allocate bios\n");
1da177e4 2162
7878cba9 2163 bio_integrity_init();
1da177e4
LT
2164 biovec_init_slabs();
2165
f4f8154a 2166 if (bioset_init(&fs_bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS))
1da177e4
LT
2167 panic("bio: can't allocate bios\n");
2168
f4f8154a 2169 if (bioset_integrity_create(&fs_bio_set, BIO_POOL_SIZE))
a91a2785
MP
2170 panic("bio: can't create integrity pool\n");
2171
1da177e4
LT
2172 return 0;
2173}
1da177e4 2174subsys_initcall(init_bio);