Commit | Line | Data |
---|---|---|
8c16567d | 1 | // SPDX-License-Identifier: GPL-2.0 |
1da177e4 | 2 | /* |
0fe23479 | 3 | * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk> |
1da177e4 LT |
4 | */ |
5 | #include <linux/mm.h> | |
6 | #include <linux/swap.h> | |
7 | #include <linux/bio.h> | |
8 | #include <linux/blkdev.h> | |
a27bb332 | 9 | #include <linux/uio.h> |
852c788f | 10 | #include <linux/iocontext.h> |
1da177e4 LT |
11 | #include <linux/slab.h> |
12 | #include <linux/init.h> | |
13 | #include <linux/kernel.h> | |
630d9c47 | 14 | #include <linux/export.h> |
1da177e4 LT |
15 | #include <linux/mempool.h> |
16 | #include <linux/workqueue.h> | |
852c788f | 17 | #include <linux/cgroup.h> |
08e18eab | 18 | #include <linux/blk-cgroup.h> |
b4c5875d | 19 | #include <linux/highmem.h> |
de6a78b6 | 20 | #include <linux/sched/sysctl.h> |
a892c8d5 | 21 | #include <linux/blk-crypto.h> |
49d1ec85 | 22 | #include <linux/xarray.h> |
1da177e4 | 23 | |
55782138 | 24 | #include <trace/events/block.h> |
9e234eea | 25 | #include "blk.h" |
67b42d0b | 26 | #include "blk-rq-qos.h" |
0bfc2455 | 27 | |
be4d234d | 28 | struct bio_alloc_cache { |
fcade2ce | 29 | struct bio *free_list; |
be4d234d JA |
30 | unsigned int nr; |
31 | }; | |
32 | ||
de76fd89 | 33 | static struct biovec_slab { |
6ac0b715 CH |
34 | int nr_vecs; |
35 | char *name; | |
36 | struct kmem_cache *slab; | |
de76fd89 CH |
37 | } bvec_slabs[] __read_mostly = { |
38 | { .nr_vecs = 16, .name = "biovec-16" }, | |
39 | { .nr_vecs = 64, .name = "biovec-64" }, | |
40 | { .nr_vecs = 128, .name = "biovec-128" }, | |
a8affc03 | 41 | { .nr_vecs = BIO_MAX_VECS, .name = "biovec-max" }, |
1da177e4 | 42 | }; |
6ac0b715 | 43 | |
7a800a20 CH |
44 | static struct biovec_slab *biovec_slab(unsigned short nr_vecs) |
45 | { | |
46 | switch (nr_vecs) { | |
47 | /* smaller bios use inline vecs */ | |
48 | case 5 ... 16: | |
49 | return &bvec_slabs[0]; | |
50 | case 17 ... 64: | |
51 | return &bvec_slabs[1]; | |
52 | case 65 ... 128: | |
53 | return &bvec_slabs[2]; | |
a8affc03 | 54 | case 129 ... BIO_MAX_VECS: |
7a800a20 CH |
55 | return &bvec_slabs[3]; |
56 | default: | |
57 | BUG(); | |
58 | return NULL; | |
59 | } | |
60 | } | |
1da177e4 | 61 | |
1da177e4 LT |
62 | /* |
63 | * fs_bio_set is the bio_set containing bio and iovec memory pools used by | |
64 | * IO code that does not need private memory pools. | |
65 | */ | |
f4f8154a | 66 | struct bio_set fs_bio_set; |
3f86a82a | 67 | EXPORT_SYMBOL(fs_bio_set); |
1da177e4 | 68 | |
bb799ca0 JA |
69 | /* |
70 | * Our slab pool management | |
71 | */ | |
72 | struct bio_slab { | |
73 | struct kmem_cache *slab; | |
74 | unsigned int slab_ref; | |
75 | unsigned int slab_size; | |
76 | char name[8]; | |
77 | }; | |
78 | static DEFINE_MUTEX(bio_slab_lock); | |
49d1ec85 | 79 | static DEFINE_XARRAY(bio_slabs); |
bb799ca0 | 80 | |
49d1ec85 | 81 | static struct bio_slab *create_bio_slab(unsigned int size) |
bb799ca0 | 82 | { |
49d1ec85 | 83 | struct bio_slab *bslab = kzalloc(sizeof(*bslab), GFP_KERNEL); |
bb799ca0 | 84 | |
49d1ec85 ML |
85 | if (!bslab) |
86 | return NULL; | |
bb799ca0 | 87 | |
49d1ec85 ML |
88 | snprintf(bslab->name, sizeof(bslab->name), "bio-%d", size); |
89 | bslab->slab = kmem_cache_create(bslab->name, size, | |
1a7e76e4 CH |
90 | ARCH_KMALLOC_MINALIGN, |
91 | SLAB_HWCACHE_ALIGN | SLAB_TYPESAFE_BY_RCU, NULL); | |
49d1ec85 ML |
92 | if (!bslab->slab) |
93 | goto fail_alloc_slab; | |
bb799ca0 | 94 | |
49d1ec85 ML |
95 | bslab->slab_ref = 1; |
96 | bslab->slab_size = size; | |
bb799ca0 | 97 | |
49d1ec85 ML |
98 | if (!xa_err(xa_store(&bio_slabs, size, bslab, GFP_KERNEL))) |
99 | return bslab; | |
bb799ca0 | 100 | |
49d1ec85 | 101 | kmem_cache_destroy(bslab->slab); |
bb799ca0 | 102 | |
49d1ec85 ML |
103 | fail_alloc_slab: |
104 | kfree(bslab); | |
105 | return NULL; | |
106 | } | |
bb799ca0 | 107 | |
49d1ec85 ML |
108 | static inline unsigned int bs_bio_slab_size(struct bio_set *bs) |
109 | { | |
9f180e31 | 110 | return bs->front_pad + sizeof(struct bio) + bs->back_pad; |
49d1ec85 | 111 | } |
bb799ca0 | 112 | |
49d1ec85 ML |
113 | static struct kmem_cache *bio_find_or_create_slab(struct bio_set *bs) |
114 | { | |
115 | unsigned int size = bs_bio_slab_size(bs); | |
116 | struct bio_slab *bslab; | |
bb799ca0 | 117 | |
49d1ec85 ML |
118 | mutex_lock(&bio_slab_lock); |
119 | bslab = xa_load(&bio_slabs, size); | |
120 | if (bslab) | |
121 | bslab->slab_ref++; | |
122 | else | |
123 | bslab = create_bio_slab(size); | |
bb799ca0 | 124 | mutex_unlock(&bio_slab_lock); |
49d1ec85 ML |
125 | |
126 | if (bslab) | |
127 | return bslab->slab; | |
128 | return NULL; | |
bb799ca0 JA |
129 | } |
130 | ||
131 | static void bio_put_slab(struct bio_set *bs) | |
132 | { | |
133 | struct bio_slab *bslab = NULL; | |
49d1ec85 | 134 | unsigned int slab_size = bs_bio_slab_size(bs); |
bb799ca0 JA |
135 | |
136 | mutex_lock(&bio_slab_lock); | |
137 | ||
49d1ec85 | 138 | bslab = xa_load(&bio_slabs, slab_size); |
bb799ca0 JA |
139 | if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n")) |
140 | goto out; | |
141 | ||
49d1ec85 ML |
142 | WARN_ON_ONCE(bslab->slab != bs->bio_slab); |
143 | ||
bb799ca0 JA |
144 | WARN_ON(!bslab->slab_ref); |
145 | ||
146 | if (--bslab->slab_ref) | |
147 | goto out; | |
148 | ||
49d1ec85 ML |
149 | xa_erase(&bio_slabs, slab_size); |
150 | ||
bb799ca0 | 151 | kmem_cache_destroy(bslab->slab); |
49d1ec85 | 152 | kfree(bslab); |
bb799ca0 JA |
153 | |
154 | out: | |
155 | mutex_unlock(&bio_slab_lock); | |
156 | } | |
157 | ||
7a800a20 | 158 | void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned short nr_vecs) |
7ba1ba12 | 159 | { |
9e8c0d0d | 160 | BUG_ON(nr_vecs > BIO_MAX_VECS); |
ed996a52 | 161 | |
a8affc03 | 162 | if (nr_vecs == BIO_MAX_VECS) |
9f060e22 | 163 | mempool_free(bv, pool); |
7a800a20 CH |
164 | else if (nr_vecs > BIO_INLINE_VECS) |
165 | kmem_cache_free(biovec_slab(nr_vecs)->slab, bv); | |
bb799ca0 | 166 | } |
bb799ca0 | 167 | |
f2c3eb9b CH |
168 | /* |
169 | * Make the first allocation restricted and don't dump info on allocation | |
170 | * failures, since we'll fall back to the mempool in case of failure. | |
171 | */ | |
172 | static inline gfp_t bvec_alloc_gfp(gfp_t gfp) | |
173 | { | |
174 | return (gfp & ~(__GFP_DIRECT_RECLAIM | __GFP_IO)) | | |
175 | __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN; | |
bb799ca0 JA |
176 | } |
177 | ||
7a800a20 CH |
178 | struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs, |
179 | gfp_t gfp_mask) | |
1da177e4 | 180 | { |
7a800a20 | 181 | struct biovec_slab *bvs = biovec_slab(*nr_vecs); |
1da177e4 | 182 | |
7a800a20 | 183 | if (WARN_ON_ONCE(!bvs)) |
7ff9345f | 184 | return NULL; |
7ff9345f JA |
185 | |
186 | /* | |
7a800a20 CH |
187 | * Upgrade the nr_vecs request to take full advantage of the allocation. |
188 | * We also rely on this in the bvec_free path. | |
7ff9345f | 189 | */ |
7a800a20 | 190 | *nr_vecs = bvs->nr_vecs; |
7ff9345f | 191 | |
7ff9345f | 192 | /* |
f007a3d6 CH |
193 | * Try a slab allocation first for all smaller allocations. If that |
194 | * fails and __GFP_DIRECT_RECLAIM is set retry with the mempool. | |
a8affc03 | 195 | * The mempool is sized to handle up to BIO_MAX_VECS entries. |
7ff9345f | 196 | */ |
a8affc03 | 197 | if (*nr_vecs < BIO_MAX_VECS) { |
f007a3d6 | 198 | struct bio_vec *bvl; |
1da177e4 | 199 | |
f2c3eb9b | 200 | bvl = kmem_cache_alloc(bvs->slab, bvec_alloc_gfp(gfp_mask)); |
7a800a20 | 201 | if (likely(bvl) || !(gfp_mask & __GFP_DIRECT_RECLAIM)) |
f007a3d6 | 202 | return bvl; |
a8affc03 | 203 | *nr_vecs = BIO_MAX_VECS; |
7ff9345f JA |
204 | } |
205 | ||
f007a3d6 | 206 | return mempool_alloc(pool, gfp_mask); |
1da177e4 LT |
207 | } |
208 | ||
9ae3b3f5 | 209 | void bio_uninit(struct bio *bio) |
1da177e4 | 210 | { |
db9819c7 CH |
211 | #ifdef CONFIG_BLK_CGROUP |
212 | if (bio->bi_blkg) { | |
213 | blkg_put(bio->bi_blkg); | |
214 | bio->bi_blkg = NULL; | |
215 | } | |
216 | #endif | |
ece841ab JT |
217 | if (bio_integrity(bio)) |
218 | bio_integrity_free(bio); | |
a892c8d5 ST |
219 | |
220 | bio_crypt_free_ctx(bio); | |
4254bba1 | 221 | } |
9ae3b3f5 | 222 | EXPORT_SYMBOL(bio_uninit); |
7ba1ba12 | 223 | |
4254bba1 KO |
224 | static void bio_free(struct bio *bio) |
225 | { | |
226 | struct bio_set *bs = bio->bi_pool; | |
227 | void *p; | |
228 | ||
9ae3b3f5 | 229 | bio_uninit(bio); |
4254bba1 KO |
230 | |
231 | if (bs) { | |
7a800a20 | 232 | bvec_free(&bs->bvec_pool, bio->bi_io_vec, bio->bi_max_vecs); |
4254bba1 KO |
233 | |
234 | /* | |
235 | * If we have front padding, adjust the bio pointer before freeing | |
236 | */ | |
237 | p = bio; | |
bb799ca0 JA |
238 | p -= bs->front_pad; |
239 | ||
8aa6ba2f | 240 | mempool_free(p, &bs->bio_pool); |
4254bba1 KO |
241 | } else { |
242 | /* Bio was allocated by bio_kmalloc() */ | |
243 | kfree(bio); | |
244 | } | |
3676347a PO |
245 | } |
246 | ||
9ae3b3f5 JA |
247 | /* |
248 | * Users of this function have their own bio allocation. Subsequently, | |
249 | * they must remember to pair any call to bio_init() with bio_uninit() | |
250 | * when IO has completed, or when the bio is released. | |
251 | */ | |
49add496 CH |
252 | void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table, |
253 | unsigned short max_vecs, unsigned int opf) | |
1da177e4 | 254 | { |
da521626 | 255 | bio->bi_next = NULL; |
49add496 CH |
256 | bio->bi_bdev = bdev; |
257 | bio->bi_opf = opf; | |
da521626 JA |
258 | bio->bi_flags = 0; |
259 | bio->bi_ioprio = 0; | |
260 | bio->bi_write_hint = 0; | |
261 | bio->bi_status = 0; | |
262 | bio->bi_iter.bi_sector = 0; | |
263 | bio->bi_iter.bi_size = 0; | |
264 | bio->bi_iter.bi_idx = 0; | |
265 | bio->bi_iter.bi_bvec_done = 0; | |
266 | bio->bi_end_io = NULL; | |
267 | bio->bi_private = NULL; | |
268 | #ifdef CONFIG_BLK_CGROUP | |
269 | bio->bi_blkg = NULL; | |
270 | bio->bi_issue.value = 0; | |
49add496 CH |
271 | if (bdev) |
272 | bio_associate_blkg(bio); | |
da521626 JA |
273 | #ifdef CONFIG_BLK_CGROUP_IOCOST |
274 | bio->bi_iocost_cost = 0; | |
275 | #endif | |
276 | #endif | |
277 | #ifdef CONFIG_BLK_INLINE_ENCRYPTION | |
278 | bio->bi_crypt_context = NULL; | |
279 | #endif | |
280 | #ifdef CONFIG_BLK_DEV_INTEGRITY | |
281 | bio->bi_integrity = NULL; | |
282 | #endif | |
283 | bio->bi_vcnt = 0; | |
284 | ||
c4cf5261 | 285 | atomic_set(&bio->__bi_remaining, 1); |
dac56212 | 286 | atomic_set(&bio->__bi_cnt, 1); |
3e08773c | 287 | bio->bi_cookie = BLK_QC_T_NONE; |
3a83f467 | 288 | |
3a83f467 | 289 | bio->bi_max_vecs = max_vecs; |
da521626 JA |
290 | bio->bi_io_vec = table; |
291 | bio->bi_pool = NULL; | |
1da177e4 | 292 | } |
a112a71d | 293 | EXPORT_SYMBOL(bio_init); |
1da177e4 | 294 | |
f44b48c7 KO |
295 | /** |
296 | * bio_reset - reinitialize a bio | |
297 | * @bio: bio to reset | |
a7c50c94 CH |
298 | * @bdev: block device to use the bio for |
299 | * @opf: operation and flags for bio | |
f44b48c7 KO |
300 | * |
301 | * Description: | |
302 | * After calling bio_reset(), @bio will be in the same state as a freshly | |
303 | * allocated bio returned bio bio_alloc_bioset() - the only fields that are | |
304 | * preserved are the ones that are initialized by bio_alloc_bioset(). See | |
305 | * comment in struct bio. | |
306 | */ | |
a7c50c94 | 307 | void bio_reset(struct bio *bio, struct block_device *bdev, unsigned int opf) |
f44b48c7 | 308 | { |
9ae3b3f5 | 309 | bio_uninit(bio); |
f44b48c7 | 310 | memset(bio, 0, BIO_RESET_BYTES); |
c4cf5261 | 311 | atomic_set(&bio->__bi_remaining, 1); |
a7c50c94 CH |
312 | bio->bi_bdev = bdev; |
313 | bio->bi_opf = opf; | |
f44b48c7 KO |
314 | } |
315 | EXPORT_SYMBOL(bio_reset); | |
316 | ||
38f8baae | 317 | static struct bio *__bio_chain_endio(struct bio *bio) |
196d38bc | 318 | { |
4246a0b6 CH |
319 | struct bio *parent = bio->bi_private; |
320 | ||
3edf5346 | 321 | if (bio->bi_status && !parent->bi_status) |
4e4cbee9 | 322 | parent->bi_status = bio->bi_status; |
196d38bc | 323 | bio_put(bio); |
38f8baae CH |
324 | return parent; |
325 | } | |
326 | ||
327 | static void bio_chain_endio(struct bio *bio) | |
328 | { | |
329 | bio_endio(__bio_chain_endio(bio)); | |
196d38bc KO |
330 | } |
331 | ||
332 | /** | |
333 | * bio_chain - chain bio completions | |
1051a902 | 334 | * @bio: the target bio |
5b874af6 | 335 | * @parent: the parent bio of @bio |
196d38bc KO |
336 | * |
337 | * The caller won't have a bi_end_io called when @bio completes - instead, | |
338 | * @parent's bi_end_io won't be called until both @parent and @bio have | |
339 | * completed; the chained bio will also be freed when it completes. | |
340 | * | |
341 | * The caller must not set bi_private or bi_end_io in @bio. | |
342 | */ | |
343 | void bio_chain(struct bio *bio, struct bio *parent) | |
344 | { | |
345 | BUG_ON(bio->bi_private || bio->bi_end_io); | |
346 | ||
347 | bio->bi_private = parent; | |
348 | bio->bi_end_io = bio_chain_endio; | |
c4cf5261 | 349 | bio_inc_remaining(parent); |
196d38bc KO |
350 | } |
351 | EXPORT_SYMBOL(bio_chain); | |
352 | ||
0a3140ea CK |
353 | struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev, |
354 | unsigned int nr_pages, unsigned int opf, gfp_t gfp) | |
3b005bf6 | 355 | { |
07888c66 | 356 | struct bio *new = bio_alloc(bdev, nr_pages, opf, gfp); |
0a3140ea | 357 | |
3b005bf6 CH |
358 | if (bio) { |
359 | bio_chain(bio, new); | |
360 | submit_bio(bio); | |
361 | } | |
362 | ||
363 | return new; | |
364 | } | |
365 | EXPORT_SYMBOL_GPL(blk_next_bio); | |
366 | ||
df2cb6da KO |
367 | static void bio_alloc_rescue(struct work_struct *work) |
368 | { | |
369 | struct bio_set *bs = container_of(work, struct bio_set, rescue_work); | |
370 | struct bio *bio; | |
371 | ||
372 | while (1) { | |
373 | spin_lock(&bs->rescue_lock); | |
374 | bio = bio_list_pop(&bs->rescue_list); | |
375 | spin_unlock(&bs->rescue_lock); | |
376 | ||
377 | if (!bio) | |
378 | break; | |
379 | ||
ed00aabd | 380 | submit_bio_noacct(bio); |
df2cb6da KO |
381 | } |
382 | } | |
383 | ||
384 | static void punt_bios_to_rescuer(struct bio_set *bs) | |
385 | { | |
386 | struct bio_list punt, nopunt; | |
387 | struct bio *bio; | |
388 | ||
47e0fb46 N |
389 | if (WARN_ON_ONCE(!bs->rescue_workqueue)) |
390 | return; | |
df2cb6da KO |
391 | /* |
392 | * In order to guarantee forward progress we must punt only bios that | |
393 | * were allocated from this bio_set; otherwise, if there was a bio on | |
394 | * there for a stacking driver higher up in the stack, processing it | |
395 | * could require allocating bios from this bio_set, and doing that from | |
396 | * our own rescuer would be bad. | |
397 | * | |
398 | * Since bio lists are singly linked, pop them all instead of trying to | |
399 | * remove from the middle of the list: | |
400 | */ | |
401 | ||
402 | bio_list_init(&punt); | |
403 | bio_list_init(&nopunt); | |
404 | ||
f5fe1b51 | 405 | while ((bio = bio_list_pop(¤t->bio_list[0]))) |
df2cb6da | 406 | bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio); |
f5fe1b51 | 407 | current->bio_list[0] = nopunt; |
df2cb6da | 408 | |
f5fe1b51 N |
409 | bio_list_init(&nopunt); |
410 | while ((bio = bio_list_pop(¤t->bio_list[1]))) | |
411 | bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio); | |
412 | current->bio_list[1] = nopunt; | |
df2cb6da KO |
413 | |
414 | spin_lock(&bs->rescue_lock); | |
415 | bio_list_merge(&bs->rescue_list, &punt); | |
416 | spin_unlock(&bs->rescue_lock); | |
417 | ||
418 | queue_work(bs->rescue_workqueue, &bs->rescue_work); | |
419 | } | |
420 | ||
1da177e4 LT |
421 | /** |
422 | * bio_alloc_bioset - allocate a bio for I/O | |
609be106 CH |
423 | * @bdev: block device to allocate the bio for (can be %NULL) |
424 | * @nr_vecs: number of bvecs to pre-allocate | |
425 | * @opf: operation and flags for bio | |
519c8e9f | 426 | * @gfp_mask: the GFP_* mask given to the slab allocator |
db18efac | 427 | * @bs: the bio_set to allocate from. |
1da177e4 | 428 | * |
3175199a | 429 | * Allocate a bio from the mempools in @bs. |
3f86a82a | 430 | * |
3175199a CH |
431 | * If %__GFP_DIRECT_RECLAIM is set then bio_alloc will always be able to |
432 | * allocate a bio. This is due to the mempool guarantees. To make this work, | |
433 | * callers must never allocate more than 1 bio at a time from the general pool. | |
434 | * Callers that need to allocate more than 1 bio must always submit the | |
435 | * previously allocated bio for IO before attempting to allocate a new one. | |
436 | * Failure to do so can cause deadlocks under memory pressure. | |
3f86a82a | 437 | * |
3175199a CH |
438 | * Note that when running under submit_bio_noacct() (i.e. any block driver), |
439 | * bios are not submitted until after you return - see the code in | |
440 | * submit_bio_noacct() that converts recursion into iteration, to prevent | |
441 | * stack overflows. | |
df2cb6da | 442 | * |
3175199a CH |
443 | * This would normally mean allocating multiple bios under submit_bio_noacct() |
444 | * would be susceptible to deadlocks, but we have | |
445 | * deadlock avoidance code that resubmits any blocked bios from a rescuer | |
446 | * thread. | |
df2cb6da | 447 | * |
3175199a CH |
448 | * However, we do not guarantee forward progress for allocations from other |
449 | * mempools. Doing multiple allocations from the same mempool under | |
450 | * submit_bio_noacct() should be avoided - instead, use bio_set's front_pad | |
451 | * for per bio allocations. | |
df2cb6da | 452 | * |
3175199a | 453 | * Returns: Pointer to new bio on success, NULL on failure. |
3f86a82a | 454 | */ |
609be106 CH |
455 | struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs, |
456 | unsigned int opf, gfp_t gfp_mask, | |
7a88fa19 | 457 | struct bio_set *bs) |
1da177e4 | 458 | { |
df2cb6da | 459 | gfp_t saved_gfp = gfp_mask; |
451a9ebf TH |
460 | struct bio *bio; |
461 | void *p; | |
462 | ||
609be106 CH |
463 | /* should not use nobvec bioset for nr_vecs > 0 */ |
464 | if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) && nr_vecs > 0)) | |
3175199a | 465 | return NULL; |
df2cb6da | 466 | |
3175199a CH |
467 | /* |
468 | * submit_bio_noacct() converts recursion to iteration; this means if | |
469 | * we're running beneath it, any bios we allocate and submit will not be | |
470 | * submitted (and thus freed) until after we return. | |
471 | * | |
472 | * This exposes us to a potential deadlock if we allocate multiple bios | |
473 | * from the same bio_set() while running underneath submit_bio_noacct(). | |
474 | * If we were to allocate multiple bios (say a stacking block driver | |
475 | * that was splitting bios), we would deadlock if we exhausted the | |
476 | * mempool's reserve. | |
477 | * | |
478 | * We solve this, and guarantee forward progress, with a rescuer | |
479 | * workqueue per bio_set. If we go to allocate and there are bios on | |
480 | * current->bio_list, we first try the allocation without | |
481 | * __GFP_DIRECT_RECLAIM; if that fails, we punt those bios we would be | |
482 | * blocking to the rescuer workqueue before we retry with the original | |
483 | * gfp_flags. | |
484 | */ | |
485 | if (current->bio_list && | |
486 | (!bio_list_empty(¤t->bio_list[0]) || | |
487 | !bio_list_empty(¤t->bio_list[1])) && | |
488 | bs->rescue_workqueue) | |
489 | gfp_mask &= ~__GFP_DIRECT_RECLAIM; | |
490 | ||
491 | p = mempool_alloc(&bs->bio_pool, gfp_mask); | |
492 | if (!p && gfp_mask != saved_gfp) { | |
493 | punt_bios_to_rescuer(bs); | |
494 | gfp_mask = saved_gfp; | |
8aa6ba2f | 495 | p = mempool_alloc(&bs->bio_pool, gfp_mask); |
3f86a82a | 496 | } |
451a9ebf TH |
497 | if (unlikely(!p)) |
498 | return NULL; | |
1da177e4 | 499 | |
3175199a | 500 | bio = p + bs->front_pad; |
609be106 | 501 | if (nr_vecs > BIO_INLINE_VECS) { |
3175199a | 502 | struct bio_vec *bvl = NULL; |
34053979 | 503 | |
609be106 | 504 | bvl = bvec_alloc(&bs->bvec_pool, &nr_vecs, gfp_mask); |
df2cb6da KO |
505 | if (!bvl && gfp_mask != saved_gfp) { |
506 | punt_bios_to_rescuer(bs); | |
507 | gfp_mask = saved_gfp; | |
609be106 | 508 | bvl = bvec_alloc(&bs->bvec_pool, &nr_vecs, gfp_mask); |
df2cb6da | 509 | } |
34053979 IM |
510 | if (unlikely(!bvl)) |
511 | goto err_free; | |
a38352e0 | 512 | |
49add496 | 513 | bio_init(bio, bdev, bvl, nr_vecs, opf); |
609be106 | 514 | } else if (nr_vecs) { |
49add496 | 515 | bio_init(bio, bdev, bio->bi_inline_vecs, BIO_INLINE_VECS, opf); |
3175199a | 516 | } else { |
49add496 | 517 | bio_init(bio, bdev, NULL, 0, opf); |
1da177e4 | 518 | } |
3f86a82a KO |
519 | |
520 | bio->bi_pool = bs; | |
1da177e4 | 521 | return bio; |
34053979 IM |
522 | |
523 | err_free: | |
8aa6ba2f | 524 | mempool_free(p, &bs->bio_pool); |
34053979 | 525 | return NULL; |
1da177e4 | 526 | } |
a112a71d | 527 | EXPORT_SYMBOL(bio_alloc_bioset); |
1da177e4 | 528 | |
3175199a CH |
529 | /** |
530 | * bio_kmalloc - kmalloc a bio for I/O | |
531 | * @gfp_mask: the GFP_* mask given to the slab allocator | |
532 | * @nr_iovecs: number of iovecs to pre-allocate | |
533 | * | |
534 | * Use kmalloc to allocate and initialize a bio. | |
535 | * | |
536 | * Returns: Pointer to new bio on success, NULL on failure. | |
537 | */ | |
0f2e6ab8 | 538 | struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned short nr_iovecs) |
3175199a CH |
539 | { |
540 | struct bio *bio; | |
541 | ||
542 | if (nr_iovecs > UIO_MAXIOV) | |
543 | return NULL; | |
544 | ||
545 | bio = kmalloc(struct_size(bio, bi_inline_vecs, nr_iovecs), gfp_mask); | |
546 | if (unlikely(!bio)) | |
547 | return NULL; | |
49add496 CH |
548 | bio_init(bio, NULL, nr_iovecs ? bio->bi_inline_vecs : NULL, nr_iovecs, |
549 | 0); | |
3175199a CH |
550 | bio->bi_pool = NULL; |
551 | return bio; | |
552 | } | |
553 | EXPORT_SYMBOL(bio_kmalloc); | |
554 | ||
6f822e1b | 555 | void zero_fill_bio(struct bio *bio) |
1da177e4 | 556 | { |
7988613b KO |
557 | struct bio_vec bv; |
558 | struct bvec_iter iter; | |
1da177e4 | 559 | |
ab6c340e CH |
560 | bio_for_each_segment(bv, bio, iter) |
561 | memzero_bvec(&bv); | |
1da177e4 | 562 | } |
6f822e1b | 563 | EXPORT_SYMBOL(zero_fill_bio); |
1da177e4 | 564 | |
83c9c547 ML |
565 | /** |
566 | * bio_truncate - truncate the bio to small size of @new_size | |
567 | * @bio: the bio to be truncated | |
568 | * @new_size: new size for truncating the bio | |
569 | * | |
570 | * Description: | |
571 | * Truncate the bio to new size of @new_size. If bio_op(bio) is | |
572 | * REQ_OP_READ, zero the truncated part. This function should only | |
573 | * be used for handling corner cases, such as bio eod. | |
574 | */ | |
4f7ab09a | 575 | static void bio_truncate(struct bio *bio, unsigned new_size) |
85a8ce62 ML |
576 | { |
577 | struct bio_vec bv; | |
578 | struct bvec_iter iter; | |
579 | unsigned int done = 0; | |
580 | bool truncated = false; | |
581 | ||
582 | if (new_size >= bio->bi_iter.bi_size) | |
583 | return; | |
584 | ||
83c9c547 | 585 | if (bio_op(bio) != REQ_OP_READ) |
85a8ce62 ML |
586 | goto exit; |
587 | ||
588 | bio_for_each_segment(bv, bio, iter) { | |
589 | if (done + bv.bv_len > new_size) { | |
590 | unsigned offset; | |
591 | ||
592 | if (!truncated) | |
593 | offset = new_size - done; | |
594 | else | |
595 | offset = 0; | |
3ee859e3 OH |
596 | zero_user(bv.bv_page, bv.bv_offset + offset, |
597 | bv.bv_len - offset); | |
85a8ce62 ML |
598 | truncated = true; |
599 | } | |
600 | done += bv.bv_len; | |
601 | } | |
602 | ||
603 | exit: | |
604 | /* | |
605 | * Don't touch bvec table here and make it really immutable, since | |
606 | * fs bio user has to retrieve all pages via bio_for_each_segment_all | |
607 | * in its .end_bio() callback. | |
608 | * | |
609 | * It is enough to truncate bio by updating .bi_size since we can make | |
610 | * correct bvec with the updated .bi_size for drivers. | |
611 | */ | |
612 | bio->bi_iter.bi_size = new_size; | |
613 | } | |
614 | ||
29125ed6 CH |
615 | /** |
616 | * guard_bio_eod - truncate a BIO to fit the block device | |
617 | * @bio: bio to truncate | |
618 | * | |
619 | * This allows us to do IO even on the odd last sectors of a device, even if the | |
620 | * block size is some multiple of the physical sector size. | |
621 | * | |
622 | * We'll just truncate the bio to the size of the device, and clear the end of | |
623 | * the buffer head manually. Truly out-of-range accesses will turn into actual | |
624 | * I/O errors, this only handles the "we need to be able to do I/O at the final | |
625 | * sector" case. | |
626 | */ | |
627 | void guard_bio_eod(struct bio *bio) | |
628 | { | |
309dca30 | 629 | sector_t maxsector = bdev_nr_sectors(bio->bi_bdev); |
29125ed6 CH |
630 | |
631 | if (!maxsector) | |
632 | return; | |
633 | ||
634 | /* | |
635 | * If the *whole* IO is past the end of the device, | |
636 | * let it through, and the IO layer will turn it into | |
637 | * an EIO. | |
638 | */ | |
639 | if (unlikely(bio->bi_iter.bi_sector >= maxsector)) | |
640 | return; | |
641 | ||
642 | maxsector -= bio->bi_iter.bi_sector; | |
643 | if (likely((bio->bi_iter.bi_size >> 9) <= maxsector)) | |
644 | return; | |
645 | ||
646 | bio_truncate(bio, maxsector << 9); | |
647 | } | |
648 | ||
be4d234d JA |
649 | #define ALLOC_CACHE_MAX 512 |
650 | #define ALLOC_CACHE_SLACK 64 | |
651 | ||
652 | static void bio_alloc_cache_prune(struct bio_alloc_cache *cache, | |
653 | unsigned int nr) | |
654 | { | |
655 | unsigned int i = 0; | |
656 | struct bio *bio; | |
657 | ||
fcade2ce JA |
658 | while ((bio = cache->free_list) != NULL) { |
659 | cache->free_list = bio->bi_next; | |
be4d234d JA |
660 | cache->nr--; |
661 | bio_free(bio); | |
662 | if (++i == nr) | |
663 | break; | |
664 | } | |
665 | } | |
666 | ||
667 | static int bio_cpu_dead(unsigned int cpu, struct hlist_node *node) | |
668 | { | |
669 | struct bio_set *bs; | |
670 | ||
671 | bs = hlist_entry_safe(node, struct bio_set, cpuhp_dead); | |
672 | if (bs->cache) { | |
673 | struct bio_alloc_cache *cache = per_cpu_ptr(bs->cache, cpu); | |
674 | ||
675 | bio_alloc_cache_prune(cache, -1U); | |
676 | } | |
677 | return 0; | |
678 | } | |
679 | ||
680 | static void bio_alloc_cache_destroy(struct bio_set *bs) | |
681 | { | |
682 | int cpu; | |
683 | ||
684 | if (!bs->cache) | |
685 | return; | |
686 | ||
687 | cpuhp_state_remove_instance_nocalls(CPUHP_BIO_DEAD, &bs->cpuhp_dead); | |
688 | for_each_possible_cpu(cpu) { | |
689 | struct bio_alloc_cache *cache; | |
690 | ||
691 | cache = per_cpu_ptr(bs->cache, cpu); | |
692 | bio_alloc_cache_prune(cache, -1U); | |
693 | } | |
694 | free_percpu(bs->cache); | |
695 | } | |
696 | ||
1da177e4 LT |
697 | /** |
698 | * bio_put - release a reference to a bio | |
699 | * @bio: bio to release reference to | |
700 | * | |
701 | * Description: | |
702 | * Put a reference to a &struct bio, either one you have gotten with | |
9b10f6a9 | 703 | * bio_alloc, bio_get or bio_clone_*. The last put of a bio will free it. |
1da177e4 LT |
704 | **/ |
705 | void bio_put(struct bio *bio) | |
706 | { | |
be4d234d | 707 | if (unlikely(bio_flagged(bio, BIO_REFFED))) { |
9e8c0d0d | 708 | BUG_ON(!atomic_read(&bio->__bi_cnt)); |
be4d234d JA |
709 | if (!atomic_dec_and_test(&bio->__bi_cnt)) |
710 | return; | |
711 | } | |
dac56212 | 712 | |
be4d234d JA |
713 | if (bio_flagged(bio, BIO_PERCPU_CACHE)) { |
714 | struct bio_alloc_cache *cache; | |
715 | ||
716 | bio_uninit(bio); | |
717 | cache = per_cpu_ptr(bio->bi_pool->cache, get_cpu()); | |
fcade2ce JA |
718 | bio->bi_next = cache->free_list; |
719 | cache->free_list = bio; | |
be4d234d JA |
720 | if (++cache->nr > ALLOC_CACHE_MAX + ALLOC_CACHE_SLACK) |
721 | bio_alloc_cache_prune(cache, ALLOC_CACHE_SLACK); | |
722 | put_cpu(); | |
723 | } else { | |
724 | bio_free(bio); | |
dac56212 | 725 | } |
1da177e4 | 726 | } |
a112a71d | 727 | EXPORT_SYMBOL(bio_put); |
1da177e4 | 728 | |
59d276fe KO |
729 | /** |
730 | * __bio_clone_fast - clone a bio that shares the original bio's biovec | |
731 | * @bio: destination bio | |
732 | * @bio_src: bio to clone | |
733 | * | |
734 | * Clone a &bio. Caller will own the returned bio, but not | |
735 | * the actual data it points to. Reference count of returned | |
736 | * bio will be one. | |
737 | * | |
738 | * Caller must ensure that @bio_src is not freed before @bio. | |
739 | */ | |
740 | void __bio_clone_fast(struct bio *bio, struct bio *bio_src) | |
741 | { | |
7a800a20 | 742 | WARN_ON_ONCE(bio->bi_pool && bio->bi_max_vecs); |
59d276fe KO |
743 | |
744 | /* | |
309dca30 | 745 | * most users will be overriding ->bi_bdev with a new target, |
59d276fe KO |
746 | * so we don't set nor calculate new physical/hw segment counts here |
747 | */ | |
309dca30 | 748 | bio->bi_bdev = bio_src->bi_bdev; |
b7c44ed9 | 749 | bio_set_flag(bio, BIO_CLONED); |
111be883 SL |
750 | if (bio_flagged(bio_src, BIO_THROTTLED)) |
751 | bio_set_flag(bio, BIO_THROTTLED); | |
46bbf653 CH |
752 | if (bio_flagged(bio_src, BIO_REMAPPED)) |
753 | bio_set_flag(bio, BIO_REMAPPED); | |
1eff9d32 | 754 | bio->bi_opf = bio_src->bi_opf; |
ca474b73 | 755 | bio->bi_ioprio = bio_src->bi_ioprio; |
cb6934f8 | 756 | bio->bi_write_hint = bio_src->bi_write_hint; |
59d276fe KO |
757 | bio->bi_iter = bio_src->bi_iter; |
758 | bio->bi_io_vec = bio_src->bi_io_vec; | |
20bd723e | 759 | |
db6638d7 | 760 | bio_clone_blkg_association(bio, bio_src); |
e439bedf | 761 | blkcg_bio_issue_init(bio); |
59d276fe KO |
762 | } |
763 | EXPORT_SYMBOL(__bio_clone_fast); | |
764 | ||
765 | /** | |
766 | * bio_clone_fast - clone a bio that shares the original bio's biovec | |
767 | * @bio: bio to clone | |
768 | * @gfp_mask: allocation priority | |
769 | * @bs: bio_set to allocate from | |
770 | * | |
771 | * Like __bio_clone_fast, only also allocates the returned bio | |
772 | */ | |
773 | struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs) | |
774 | { | |
775 | struct bio *b; | |
776 | ||
609be106 | 777 | b = bio_alloc_bioset(NULL, 0, 0, gfp_mask, bs); |
59d276fe KO |
778 | if (!b) |
779 | return NULL; | |
780 | ||
781 | __bio_clone_fast(b, bio); | |
782 | ||
07560151 EB |
783 | if (bio_crypt_clone(b, bio, gfp_mask) < 0) |
784 | goto err_put; | |
a892c8d5 | 785 | |
07560151 EB |
786 | if (bio_integrity(bio) && |
787 | bio_integrity_clone(b, bio, gfp_mask) < 0) | |
788 | goto err_put; | |
59d276fe KO |
789 | |
790 | return b; | |
07560151 EB |
791 | |
792 | err_put: | |
793 | bio_put(b); | |
794 | return NULL; | |
59d276fe KO |
795 | } |
796 | EXPORT_SYMBOL(bio_clone_fast); | |
797 | ||
5cbd28e3 CH |
798 | const char *bio_devname(struct bio *bio, char *buf) |
799 | { | |
309dca30 | 800 | return bdevname(bio->bi_bdev, buf); |
5cbd28e3 CH |
801 | } |
802 | EXPORT_SYMBOL(bio_devname); | |
803 | ||
9a6083be CH |
804 | /** |
805 | * bio_full - check if the bio is full | |
806 | * @bio: bio to check | |
807 | * @len: length of one segment to be added | |
808 | * | |
809 | * Return true if @bio is full and one segment with @len bytes can't be | |
810 | * added to the bio, otherwise return false | |
811 | */ | |
812 | static inline bool bio_full(struct bio *bio, unsigned len) | |
813 | { | |
814 | if (bio->bi_vcnt >= bio->bi_max_vecs) | |
815 | return true; | |
816 | if (bio->bi_iter.bi_size > UINT_MAX - len) | |
817 | return true; | |
818 | return false; | |
819 | } | |
820 | ||
5919482e ML |
821 | static inline bool page_is_mergeable(const struct bio_vec *bv, |
822 | struct page *page, unsigned int len, unsigned int off, | |
ff896738 | 823 | bool *same_page) |
5919482e | 824 | { |
d8166519 MWO |
825 | size_t bv_end = bv->bv_offset + bv->bv_len; |
826 | phys_addr_t vec_end_addr = page_to_phys(bv->bv_page) + bv_end - 1; | |
5919482e ML |
827 | phys_addr_t page_addr = page_to_phys(page); |
828 | ||
829 | if (vec_end_addr + 1 != page_addr + off) | |
830 | return false; | |
831 | if (xen_domain() && !xen_biovec_phys_mergeable(bv, page)) | |
832 | return false; | |
52d52d1c | 833 | |
ff896738 | 834 | *same_page = ((vec_end_addr & PAGE_MASK) == page_addr); |
d8166519 MWO |
835 | if (*same_page) |
836 | return true; | |
837 | return (bv->bv_page + bv_end / PAGE_SIZE) == (page + off / PAGE_SIZE); | |
5919482e ML |
838 | } |
839 | ||
9774b391 CH |
840 | /** |
841 | * __bio_try_merge_page - try appending data to an existing bvec. | |
842 | * @bio: destination bio | |
843 | * @page: start page to add | |
844 | * @len: length of the data to add | |
845 | * @off: offset of the data relative to @page | |
846 | * @same_page: return if the segment has been merged inside the same page | |
847 | * | |
848 | * Try to add the data at @page + @off to the last bvec of @bio. This is a | |
849 | * useful optimisation for file systems with a block size smaller than the | |
850 | * page size. | |
851 | * | |
852 | * Warn if (@len, @off) crosses pages in case that @same_page is true. | |
853 | * | |
854 | * Return %true on success or %false on failure. | |
855 | */ | |
856 | static bool __bio_try_merge_page(struct bio *bio, struct page *page, | |
857 | unsigned int len, unsigned int off, bool *same_page) | |
858 | { | |
859 | if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED))) | |
860 | return false; | |
861 | ||
862 | if (bio->bi_vcnt > 0) { | |
863 | struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1]; | |
864 | ||
865 | if (page_is_mergeable(bv, page, len, off, same_page)) { | |
866 | if (bio->bi_iter.bi_size > UINT_MAX - len) { | |
867 | *same_page = false; | |
868 | return false; | |
869 | } | |
870 | bv->bv_len += len; | |
871 | bio->bi_iter.bi_size += len; | |
872 | return true; | |
873 | } | |
874 | } | |
875 | return false; | |
876 | } | |
877 | ||
e4581105 CH |
878 | /* |
879 | * Try to merge a page into a segment, while obeying the hardware segment | |
880 | * size limit. This is not for normal read/write bios, but for passthrough | |
881 | * or Zone Append operations that we can't split. | |
882 | */ | |
883 | static bool bio_try_merge_hw_seg(struct request_queue *q, struct bio *bio, | |
884 | struct page *page, unsigned len, | |
885 | unsigned offset, bool *same_page) | |
489fbbcb | 886 | { |
384209cd | 887 | struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1]; |
489fbbcb ML |
888 | unsigned long mask = queue_segment_boundary(q); |
889 | phys_addr_t addr1 = page_to_phys(bv->bv_page) + bv->bv_offset; | |
890 | phys_addr_t addr2 = page_to_phys(page) + offset + len - 1; | |
891 | ||
892 | if ((addr1 | mask) != (addr2 | mask)) | |
893 | return false; | |
489fbbcb ML |
894 | if (bv->bv_len + len > queue_max_segment_size(q)) |
895 | return false; | |
384209cd | 896 | return __bio_try_merge_page(bio, page, len, offset, same_page); |
489fbbcb ML |
897 | } |
898 | ||
1da177e4 | 899 | /** |
e4581105 CH |
900 | * bio_add_hw_page - attempt to add a page to a bio with hw constraints |
901 | * @q: the target queue | |
902 | * @bio: destination bio | |
903 | * @page: page to add | |
904 | * @len: vec entry length | |
905 | * @offset: vec entry offset | |
906 | * @max_sectors: maximum number of sectors that can be added | |
907 | * @same_page: return if the segment has been merged inside the same page | |
c66a14d0 | 908 | * |
e4581105 CH |
909 | * Add a page to a bio while respecting the hardware max_sectors, max_segment |
910 | * and gap limitations. | |
1da177e4 | 911 | */ |
e4581105 | 912 | int bio_add_hw_page(struct request_queue *q, struct bio *bio, |
19047087 | 913 | struct page *page, unsigned int len, unsigned int offset, |
e4581105 | 914 | unsigned int max_sectors, bool *same_page) |
1da177e4 | 915 | { |
1da177e4 LT |
916 | struct bio_vec *bvec; |
917 | ||
e4581105 | 918 | if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED))) |
1da177e4 LT |
919 | return 0; |
920 | ||
e4581105 | 921 | if (((bio->bi_iter.bi_size + len) >> 9) > max_sectors) |
1da177e4 LT |
922 | return 0; |
923 | ||
80cfd548 | 924 | if (bio->bi_vcnt > 0) { |
e4581105 | 925 | if (bio_try_merge_hw_seg(q, bio, page, len, offset, same_page)) |
384209cd | 926 | return len; |
320ea869 CH |
927 | |
928 | /* | |
929 | * If the queue doesn't support SG gaps and adding this segment | |
930 | * would create a gap, disallow it. | |
931 | */ | |
384209cd | 932 | bvec = &bio->bi_io_vec[bio->bi_vcnt - 1]; |
320ea869 CH |
933 | if (bvec_gap_to_prev(q, bvec, offset)) |
934 | return 0; | |
80cfd548 JA |
935 | } |
936 | ||
79d08f89 | 937 | if (bio_full(bio, len)) |
1da177e4 LT |
938 | return 0; |
939 | ||
14ccb66b | 940 | if (bio->bi_vcnt >= queue_max_segments(q)) |
489fbbcb ML |
941 | return 0; |
942 | ||
fcbf6a08 ML |
943 | bvec = &bio->bi_io_vec[bio->bi_vcnt]; |
944 | bvec->bv_page = page; | |
945 | bvec->bv_len = len; | |
946 | bvec->bv_offset = offset; | |
947 | bio->bi_vcnt++; | |
dcdca753 | 948 | bio->bi_iter.bi_size += len; |
1da177e4 LT |
949 | return len; |
950 | } | |
19047087 | 951 | |
e4581105 CH |
952 | /** |
953 | * bio_add_pc_page - attempt to add page to passthrough bio | |
954 | * @q: the target queue | |
955 | * @bio: destination bio | |
956 | * @page: page to add | |
957 | * @len: vec entry length | |
958 | * @offset: vec entry offset | |
959 | * | |
960 | * Attempt to add a page to the bio_vec maplist. This can fail for a | |
961 | * number of reasons, such as the bio being full or target block device | |
962 | * limitations. The target block device must allow bio's up to PAGE_SIZE, | |
963 | * so it is always possible to add a single page to an empty bio. | |
964 | * | |
965 | * This should only be used by passthrough bios. | |
966 | */ | |
19047087 ML |
967 | int bio_add_pc_page(struct request_queue *q, struct bio *bio, |
968 | struct page *page, unsigned int len, unsigned int offset) | |
969 | { | |
d1916c86 | 970 | bool same_page = false; |
e4581105 CH |
971 | return bio_add_hw_page(q, bio, page, len, offset, |
972 | queue_max_hw_sectors(q), &same_page); | |
19047087 | 973 | } |
a112a71d | 974 | EXPORT_SYMBOL(bio_add_pc_page); |
6e68af66 | 975 | |
ae29333f JT |
976 | /** |
977 | * bio_add_zone_append_page - attempt to add page to zone-append bio | |
978 | * @bio: destination bio | |
979 | * @page: page to add | |
980 | * @len: vec entry length | |
981 | * @offset: vec entry offset | |
982 | * | |
983 | * Attempt to add a page to the bio_vec maplist of a bio that will be submitted | |
984 | * for a zone-append request. This can fail for a number of reasons, such as the | |
985 | * bio being full or the target block device is not a zoned block device or | |
986 | * other limitations of the target block device. The target block device must | |
987 | * allow bio's up to PAGE_SIZE, so it is always possible to add a single page | |
988 | * to an empty bio. | |
989 | * | |
990 | * Returns: number of bytes added to the bio, or 0 in case of a failure. | |
991 | */ | |
992 | int bio_add_zone_append_page(struct bio *bio, struct page *page, | |
993 | unsigned int len, unsigned int offset) | |
994 | { | |
3caee463 | 995 | struct request_queue *q = bdev_get_queue(bio->bi_bdev); |
ae29333f JT |
996 | bool same_page = false; |
997 | ||
998 | if (WARN_ON_ONCE(bio_op(bio) != REQ_OP_ZONE_APPEND)) | |
999 | return 0; | |
1000 | ||
1001 | if (WARN_ON_ONCE(!blk_queue_is_zoned(q))) | |
1002 | return 0; | |
1003 | ||
1004 | return bio_add_hw_page(q, bio, page, len, offset, | |
1005 | queue_max_zone_append_sectors(q), &same_page); | |
1006 | } | |
1007 | EXPORT_SYMBOL_GPL(bio_add_zone_append_page); | |
1008 | ||
0aa69fd3 | 1009 | /** |
551879a4 | 1010 | * __bio_add_page - add page(s) to a bio in a new segment |
0aa69fd3 | 1011 | * @bio: destination bio |
551879a4 ML |
1012 | * @page: start page to add |
1013 | * @len: length of the data to add, may cross pages | |
1014 | * @off: offset of the data relative to @page, may cross pages | |
0aa69fd3 CH |
1015 | * |
1016 | * Add the data at @page + @off to @bio as a new bvec. The caller must ensure | |
1017 | * that @bio has space for another bvec. | |
1018 | */ | |
1019 | void __bio_add_page(struct bio *bio, struct page *page, | |
1020 | unsigned int len, unsigned int off) | |
1021 | { | |
1022 | struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt]; | |
c66a14d0 | 1023 | |
0aa69fd3 | 1024 | WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)); |
79d08f89 | 1025 | WARN_ON_ONCE(bio_full(bio, len)); |
0aa69fd3 CH |
1026 | |
1027 | bv->bv_page = page; | |
1028 | bv->bv_offset = off; | |
1029 | bv->bv_len = len; | |
c66a14d0 | 1030 | |
c66a14d0 | 1031 | bio->bi_iter.bi_size += len; |
0aa69fd3 | 1032 | bio->bi_vcnt++; |
b8e24a93 JW |
1033 | |
1034 | if (!bio_flagged(bio, BIO_WORKINGSET) && unlikely(PageWorkingset(page))) | |
1035 | bio_set_flag(bio, BIO_WORKINGSET); | |
0aa69fd3 CH |
1036 | } |
1037 | EXPORT_SYMBOL_GPL(__bio_add_page); | |
1038 | ||
1039 | /** | |
551879a4 | 1040 | * bio_add_page - attempt to add page(s) to bio |
0aa69fd3 | 1041 | * @bio: destination bio |
551879a4 ML |
1042 | * @page: start page to add |
1043 | * @len: vec entry length, may cross pages | |
1044 | * @offset: vec entry offset relative to @page, may cross pages | |
0aa69fd3 | 1045 | * |
551879a4 | 1046 | * Attempt to add page(s) to the bio_vec maplist. This will only fail |
0aa69fd3 CH |
1047 | * if either bio->bi_vcnt == bio->bi_max_vecs or it's a cloned bio. |
1048 | */ | |
1049 | int bio_add_page(struct bio *bio, struct page *page, | |
1050 | unsigned int len, unsigned int offset) | |
1051 | { | |
ff896738 CH |
1052 | bool same_page = false; |
1053 | ||
1054 | if (!__bio_try_merge_page(bio, page, len, offset, &same_page)) { | |
79d08f89 | 1055 | if (bio_full(bio, len)) |
0aa69fd3 CH |
1056 | return 0; |
1057 | __bio_add_page(bio, page, len, offset); | |
1058 | } | |
c66a14d0 | 1059 | return len; |
1da177e4 | 1060 | } |
a112a71d | 1061 | EXPORT_SYMBOL(bio_add_page); |
1da177e4 | 1062 | |
85f5a74c MWO |
1063 | /** |
1064 | * bio_add_folio - Attempt to add part of a folio to a bio. | |
1065 | * @bio: BIO to add to. | |
1066 | * @folio: Folio to add. | |
1067 | * @len: How many bytes from the folio to add. | |
1068 | * @off: First byte in this folio to add. | |
1069 | * | |
1070 | * Filesystems that use folios can call this function instead of calling | |
1071 | * bio_add_page() for each page in the folio. If @off is bigger than | |
1072 | * PAGE_SIZE, this function can create a bio_vec that starts in a page | |
1073 | * after the bv_page. BIOs do not support folios that are 4GiB or larger. | |
1074 | * | |
1075 | * Return: Whether the addition was successful. | |
1076 | */ | |
1077 | bool bio_add_folio(struct bio *bio, struct folio *folio, size_t len, | |
1078 | size_t off) | |
1079 | { | |
1080 | if (len > UINT_MAX || off > UINT_MAX) | |
455a844d | 1081 | return false; |
85f5a74c MWO |
1082 | return bio_add_page(bio, &folio->page, len, off) > 0; |
1083 | } | |
1084 | ||
c809084a | 1085 | void __bio_release_pages(struct bio *bio, bool mark_dirty) |
7321ecbf CH |
1086 | { |
1087 | struct bvec_iter_all iter_all; | |
1088 | struct bio_vec *bvec; | |
7321ecbf | 1089 | |
d241a95f CH |
1090 | bio_for_each_segment_all(bvec, bio, iter_all) { |
1091 | if (mark_dirty && !PageCompound(bvec->bv_page)) | |
1092 | set_page_dirty_lock(bvec->bv_page); | |
7321ecbf | 1093 | put_page(bvec->bv_page); |
d241a95f | 1094 | } |
7321ecbf | 1095 | } |
c809084a | 1096 | EXPORT_SYMBOL_GPL(__bio_release_pages); |
7321ecbf | 1097 | |
1bb6b810 | 1098 | void bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter) |
6d0c48ae | 1099 | { |
fa5fa8ec PB |
1100 | size_t size = iov_iter_count(iter); |
1101 | ||
7a800a20 | 1102 | WARN_ON_ONCE(bio->bi_max_vecs); |
c42bca92 | 1103 | |
fa5fa8ec PB |
1104 | if (bio_op(bio) == REQ_OP_ZONE_APPEND) { |
1105 | struct request_queue *q = bdev_get_queue(bio->bi_bdev); | |
1106 | size_t max_sectors = queue_max_zone_append_sectors(q); | |
1107 | ||
1108 | size = min(size, max_sectors << SECTOR_SHIFT); | |
1109 | } | |
1110 | ||
c42bca92 | 1111 | bio->bi_vcnt = iter->nr_segs; |
c42bca92 PB |
1112 | bio->bi_io_vec = (struct bio_vec *)iter->bvec; |
1113 | bio->bi_iter.bi_bvec_done = iter->iov_offset; | |
fa5fa8ec | 1114 | bio->bi_iter.bi_size = size; |
ed97ce5e | 1115 | bio_set_flag(bio, BIO_NO_PAGE_REF); |
977be012 | 1116 | bio_set_flag(bio, BIO_CLONED); |
7de55b7d | 1117 | } |
c42bca92 | 1118 | |
d9cf3bd5 PB |
1119 | static void bio_put_pages(struct page **pages, size_t size, size_t off) |
1120 | { | |
1121 | size_t i, nr = DIV_ROUND_UP(size + (off & ~PAGE_MASK), PAGE_SIZE); | |
1122 | ||
1123 | for (i = 0; i < nr; i++) | |
1124 | put_page(pages[i]); | |
1125 | } | |
1126 | ||
576ed913 CH |
1127 | #define PAGE_PTRS_PER_BVEC (sizeof(struct bio_vec) / sizeof(struct page *)) |
1128 | ||
2cefe4db | 1129 | /** |
17d51b10 | 1130 | * __bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio |
2cefe4db KO |
1131 | * @bio: bio to add pages to |
1132 | * @iter: iov iterator describing the region to be mapped | |
1133 | * | |
17d51b10 | 1134 | * Pins pages from *iter and appends them to @bio's bvec array. The |
2cefe4db | 1135 | * pages will have to be released using put_page() when done. |
17d51b10 | 1136 | * For multi-segment *iter, this function only adds pages from the |
3cf14889 | 1137 | * next non-empty segment of the iov iterator. |
2cefe4db | 1138 | */ |
17d51b10 | 1139 | static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) |
2cefe4db | 1140 | { |
576ed913 CH |
1141 | unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt; |
1142 | unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt; | |
2cefe4db KO |
1143 | struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt; |
1144 | struct page **pages = (struct page **)bv; | |
45691804 | 1145 | bool same_page = false; |
576ed913 CH |
1146 | ssize_t size, left; |
1147 | unsigned len, i; | |
b403ea24 | 1148 | size_t offset; |
576ed913 CH |
1149 | |
1150 | /* | |
1151 | * Move page array up in the allocated memory for the bio vecs as far as | |
1152 | * possible so that we can start filling biovecs from the beginning | |
1153 | * without overwriting the temporary page array. | |
1154 | */ | |
1155 | BUILD_BUG_ON(PAGE_PTRS_PER_BVEC < 2); | |
1156 | pages += entries_left * (PAGE_PTRS_PER_BVEC - 1); | |
2cefe4db | 1157 | |
35c820e7 | 1158 | size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset); |
2cefe4db KO |
1159 | if (unlikely(size <= 0)) |
1160 | return size ? size : -EFAULT; | |
2cefe4db | 1161 | |
576ed913 CH |
1162 | for (left = size, i = 0; left > 0; left -= len, i++) { |
1163 | struct page *page = pages[i]; | |
2cefe4db | 1164 | |
576ed913 | 1165 | len = min_t(size_t, PAGE_SIZE - offset, left); |
45691804 CH |
1166 | |
1167 | if (__bio_try_merge_page(bio, page, len, offset, &same_page)) { | |
1168 | if (same_page) | |
1169 | put_page(page); | |
1170 | } else { | |
d9cf3bd5 PB |
1171 | if (WARN_ON_ONCE(bio_full(bio, len))) { |
1172 | bio_put_pages(pages + i, left, offset); | |
1173 | return -EINVAL; | |
1174 | } | |
45691804 CH |
1175 | __bio_add_page(bio, page, len, offset); |
1176 | } | |
576ed913 | 1177 | offset = 0; |
2cefe4db KO |
1178 | } |
1179 | ||
2cefe4db KO |
1180 | iov_iter_advance(iter, size); |
1181 | return 0; | |
1182 | } | |
17d51b10 | 1183 | |
0512a75b KB |
1184 | static int __bio_iov_append_get_pages(struct bio *bio, struct iov_iter *iter) |
1185 | { | |
1186 | unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt; | |
1187 | unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt; | |
3caee463 | 1188 | struct request_queue *q = bdev_get_queue(bio->bi_bdev); |
0512a75b KB |
1189 | unsigned int max_append_sectors = queue_max_zone_append_sectors(q); |
1190 | struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt; | |
1191 | struct page **pages = (struct page **)bv; | |
1192 | ssize_t size, left; | |
1193 | unsigned len, i; | |
1194 | size_t offset; | |
4977d121 | 1195 | int ret = 0; |
0512a75b KB |
1196 | |
1197 | if (WARN_ON_ONCE(!max_append_sectors)) | |
1198 | return 0; | |
1199 | ||
1200 | /* | |
1201 | * Move page array up in the allocated memory for the bio vecs as far as | |
1202 | * possible so that we can start filling biovecs from the beginning | |
1203 | * without overwriting the temporary page array. | |
1204 | */ | |
1205 | BUILD_BUG_ON(PAGE_PTRS_PER_BVEC < 2); | |
1206 | pages += entries_left * (PAGE_PTRS_PER_BVEC - 1); | |
1207 | ||
1208 | size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset); | |
1209 | if (unlikely(size <= 0)) | |
1210 | return size ? size : -EFAULT; | |
1211 | ||
1212 | for (left = size, i = 0; left > 0; left -= len, i++) { | |
1213 | struct page *page = pages[i]; | |
1214 | bool same_page = false; | |
1215 | ||
1216 | len = min_t(size_t, PAGE_SIZE - offset, left); | |
1217 | if (bio_add_hw_page(q, bio, page, len, offset, | |
4977d121 | 1218 | max_append_sectors, &same_page) != len) { |
d9cf3bd5 | 1219 | bio_put_pages(pages + i, left, offset); |
4977d121 NA |
1220 | ret = -EINVAL; |
1221 | break; | |
1222 | } | |
0512a75b KB |
1223 | if (same_page) |
1224 | put_page(page); | |
1225 | offset = 0; | |
1226 | } | |
1227 | ||
4977d121 NA |
1228 | iov_iter_advance(iter, size - left); |
1229 | return ret; | |
0512a75b KB |
1230 | } |
1231 | ||
17d51b10 | 1232 | /** |
6d0c48ae | 1233 | * bio_iov_iter_get_pages - add user or kernel pages to a bio |
17d51b10 | 1234 | * @bio: bio to add pages to |
6d0c48ae JA |
1235 | * @iter: iov iterator describing the region to be added |
1236 | * | |
1237 | * This takes either an iterator pointing to user memory, or one pointing to | |
1238 | * kernel pages (BVEC iterator). If we're adding user pages, we pin them and | |
1239 | * map them into the kernel. On IO completion, the caller should put those | |
c42bca92 PB |
1240 | * pages. For bvec based iterators bio_iov_iter_get_pages() uses the provided |
1241 | * bvecs rather than copying them. Hence anyone issuing kiocb based IO needs | |
1242 | * to ensure the bvecs and pages stay referenced until the submitted I/O is | |
1243 | * completed by a call to ->ki_complete() or returns with an error other than | |
1244 | * -EIOCBQUEUED. The caller needs to check if the bio is flagged BIO_NO_PAGE_REF | |
1245 | * on IO completion. If it isn't, then pages should be released. | |
17d51b10 | 1246 | * |
17d51b10 | 1247 | * The function tries, but does not guarantee, to pin as many pages as |
5cd3ddc1 | 1248 | * fit into the bio, or are requested in @iter, whatever is smaller. If |
6d0c48ae JA |
1249 | * MM encounters an error pinning the requested pages, it stops. Error |
1250 | * is returned only if 0 pages could be pinned. | |
0cf41e5e PB |
1251 | * |
1252 | * It's intended for direct IO, so doesn't do PSI tracking, the caller is | |
1253 | * responsible for setting BIO_WORKINGSET if necessary. | |
17d51b10 MW |
1254 | */ |
1255 | int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) | |
1256 | { | |
c42bca92 | 1257 | int ret = 0; |
14eacf12 | 1258 | |
c42bca92 | 1259 | if (iov_iter_is_bvec(iter)) { |
fa5fa8ec PB |
1260 | bio_iov_bvec_set(bio, iter); |
1261 | iov_iter_advance(iter, bio->bi_iter.bi_size); | |
1262 | return 0; | |
c42bca92 | 1263 | } |
17d51b10 MW |
1264 | |
1265 | do { | |
86004515 | 1266 | if (bio_op(bio) == REQ_OP_ZONE_APPEND) |
0512a75b | 1267 | ret = __bio_iov_append_get_pages(bio, iter); |
86004515 CH |
1268 | else |
1269 | ret = __bio_iov_iter_get_pages(bio, iter); | |
79d08f89 | 1270 | } while (!ret && iov_iter_count(iter) && !bio_full(bio, 0)); |
17d51b10 | 1271 | |
0cf41e5e PB |
1272 | /* don't account direct I/O as memory stall */ |
1273 | bio_clear_flag(bio, BIO_WORKINGSET); | |
14eacf12 | 1274 | return bio->bi_vcnt ? 0 : ret; |
17d51b10 | 1275 | } |
29b2a3aa | 1276 | EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages); |
2cefe4db | 1277 | |
4246a0b6 | 1278 | static void submit_bio_wait_endio(struct bio *bio) |
9e882242 | 1279 | { |
65e53aab | 1280 | complete(bio->bi_private); |
9e882242 KO |
1281 | } |
1282 | ||
1283 | /** | |
1284 | * submit_bio_wait - submit a bio, and wait until it completes | |
9e882242 KO |
1285 | * @bio: The &struct bio which describes the I/O |
1286 | * | |
1287 | * Simple wrapper around submit_bio(). Returns 0 on success, or the error from | |
1288 | * bio_endio() on failure. | |
3d289d68 JK |
1289 | * |
1290 | * WARNING: Unlike to how submit_bio() is usually used, this function does not | |
1291 | * result in bio reference to be consumed. The caller must drop the reference | |
1292 | * on his own. | |
9e882242 | 1293 | */ |
4e49ea4a | 1294 | int submit_bio_wait(struct bio *bio) |
9e882242 | 1295 | { |
309dca30 CH |
1296 | DECLARE_COMPLETION_ONSTACK_MAP(done, |
1297 | bio->bi_bdev->bd_disk->lockdep_map); | |
de6a78b6 | 1298 | unsigned long hang_check; |
9e882242 | 1299 | |
65e53aab | 1300 | bio->bi_private = &done; |
9e882242 | 1301 | bio->bi_end_io = submit_bio_wait_endio; |
1eff9d32 | 1302 | bio->bi_opf |= REQ_SYNC; |
4e49ea4a | 1303 | submit_bio(bio); |
de6a78b6 ML |
1304 | |
1305 | /* Prevent hang_check timer from firing at us during very long I/O */ | |
1306 | hang_check = sysctl_hung_task_timeout_secs; | |
1307 | if (hang_check) | |
1308 | while (!wait_for_completion_io_timeout(&done, | |
1309 | hang_check * (HZ/2))) | |
1310 | ; | |
1311 | else | |
1312 | wait_for_completion_io(&done); | |
9e882242 | 1313 | |
65e53aab | 1314 | return blk_status_to_errno(bio->bi_status); |
9e882242 KO |
1315 | } |
1316 | EXPORT_SYMBOL(submit_bio_wait); | |
1317 | ||
d4aa57a1 | 1318 | void __bio_advance(struct bio *bio, unsigned bytes) |
054bdf64 KO |
1319 | { |
1320 | if (bio_integrity(bio)) | |
1321 | bio_integrity_advance(bio, bytes); | |
1322 | ||
a892c8d5 | 1323 | bio_crypt_advance(bio, bytes); |
4550dd6c | 1324 | bio_advance_iter(bio, &bio->bi_iter, bytes); |
054bdf64 | 1325 | } |
d4aa57a1 | 1326 | EXPORT_SYMBOL(__bio_advance); |
054bdf64 | 1327 | |
45db54d5 KO |
1328 | void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter, |
1329 | struct bio *src, struct bvec_iter *src_iter) | |
16ac3d63 | 1330 | { |
45db54d5 | 1331 | while (src_iter->bi_size && dst_iter->bi_size) { |
f8b679a0 CH |
1332 | struct bio_vec src_bv = bio_iter_iovec(src, *src_iter); |
1333 | struct bio_vec dst_bv = bio_iter_iovec(dst, *dst_iter); | |
1334 | unsigned int bytes = min(src_bv.bv_len, dst_bv.bv_len); | |
1335 | void *src_buf; | |
1336 | ||
1337 | src_buf = bvec_kmap_local(&src_bv); | |
1338 | memcpy_to_bvec(&dst_bv, src_buf); | |
1339 | kunmap_local(src_buf); | |
6e6e811d | 1340 | |
22b56c29 PB |
1341 | bio_advance_iter_single(src, src_iter, bytes); |
1342 | bio_advance_iter_single(dst, dst_iter, bytes); | |
16ac3d63 KO |
1343 | } |
1344 | } | |
38a72dac KO |
1345 | EXPORT_SYMBOL(bio_copy_data_iter); |
1346 | ||
1347 | /** | |
45db54d5 KO |
1348 | * bio_copy_data - copy contents of data buffers from one bio to another |
1349 | * @src: source bio | |
1350 | * @dst: destination bio | |
38a72dac KO |
1351 | * |
1352 | * Stops when it reaches the end of either @src or @dst - that is, copies | |
1353 | * min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of bios). | |
1354 | */ | |
1355 | void bio_copy_data(struct bio *dst, struct bio *src) | |
1356 | { | |
45db54d5 KO |
1357 | struct bvec_iter src_iter = src->bi_iter; |
1358 | struct bvec_iter dst_iter = dst->bi_iter; | |
1359 | ||
1360 | bio_copy_data_iter(dst, &dst_iter, src, &src_iter); | |
38a72dac | 1361 | } |
16ac3d63 KO |
1362 | EXPORT_SYMBOL(bio_copy_data); |
1363 | ||
491221f8 | 1364 | void bio_free_pages(struct bio *bio) |
1dfa0f68 CH |
1365 | { |
1366 | struct bio_vec *bvec; | |
6dc4f100 | 1367 | struct bvec_iter_all iter_all; |
1dfa0f68 | 1368 | |
2b070cfe | 1369 | bio_for_each_segment_all(bvec, bio, iter_all) |
1dfa0f68 CH |
1370 | __free_page(bvec->bv_page); |
1371 | } | |
491221f8 | 1372 | EXPORT_SYMBOL(bio_free_pages); |
1dfa0f68 | 1373 | |
1da177e4 LT |
1374 | /* |
1375 | * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions | |
1376 | * for performing direct-IO in BIOs. | |
1377 | * | |
1378 | * The problem is that we cannot run set_page_dirty() from interrupt context | |
1379 | * because the required locks are not interrupt-safe. So what we can do is to | |
1380 | * mark the pages dirty _before_ performing IO. And in interrupt context, | |
1381 | * check that the pages are still dirty. If so, fine. If not, redirty them | |
1382 | * in process context. | |
1383 | * | |
1384 | * We special-case compound pages here: normally this means reads into hugetlb | |
1385 | * pages. The logic in here doesn't really work right for compound pages | |
1386 | * because the VM does not uniformly chase down the head page in all cases. | |
1387 | * But dirtiness of compound pages is pretty meaningless anyway: the VM doesn't | |
1388 | * handle them at all. So we skip compound pages here at an early stage. | |
1389 | * | |
1390 | * Note that this code is very hard to test under normal circumstances because | |
1391 | * direct-io pins the pages with get_user_pages(). This makes | |
1392 | * is_page_cache_freeable return false, and the VM will not clean the pages. | |
0d5c3eba | 1393 | * But other code (eg, flusher threads) could clean the pages if they are mapped |
1da177e4 LT |
1394 | * pagecache. |
1395 | * | |
1396 | * Simply disabling the call to bio_set_pages_dirty() is a good way to test the | |
1397 | * deferred bio dirtying paths. | |
1398 | */ | |
1399 | ||
1400 | /* | |
1401 | * bio_set_pages_dirty() will mark all the bio's pages as dirty. | |
1402 | */ | |
1403 | void bio_set_pages_dirty(struct bio *bio) | |
1404 | { | |
cb34e057 | 1405 | struct bio_vec *bvec; |
6dc4f100 | 1406 | struct bvec_iter_all iter_all; |
1da177e4 | 1407 | |
2b070cfe | 1408 | bio_for_each_segment_all(bvec, bio, iter_all) { |
3bb50983 CH |
1409 | if (!PageCompound(bvec->bv_page)) |
1410 | set_page_dirty_lock(bvec->bv_page); | |
1da177e4 LT |
1411 | } |
1412 | } | |
1413 | ||
1da177e4 LT |
1414 | /* |
1415 | * bio_check_pages_dirty() will check that all the BIO's pages are still dirty. | |
1416 | * If they are, then fine. If, however, some pages are clean then they must | |
1417 | * have been written out during the direct-IO read. So we take another ref on | |
24d5493f | 1418 | * the BIO and re-dirty the pages in process context. |
1da177e4 LT |
1419 | * |
1420 | * It is expected that bio_check_pages_dirty() will wholly own the BIO from | |
ea1754a0 KS |
1421 | * here on. It will run one put_page() against each page and will run one |
1422 | * bio_put() against the BIO. | |
1da177e4 LT |
1423 | */ |
1424 | ||
65f27f38 | 1425 | static void bio_dirty_fn(struct work_struct *work); |
1da177e4 | 1426 | |
65f27f38 | 1427 | static DECLARE_WORK(bio_dirty_work, bio_dirty_fn); |
1da177e4 LT |
1428 | static DEFINE_SPINLOCK(bio_dirty_lock); |
1429 | static struct bio *bio_dirty_list; | |
1430 | ||
1431 | /* | |
1432 | * This runs in process context | |
1433 | */ | |
65f27f38 | 1434 | static void bio_dirty_fn(struct work_struct *work) |
1da177e4 | 1435 | { |
24d5493f | 1436 | struct bio *bio, *next; |
1da177e4 | 1437 | |
24d5493f CH |
1438 | spin_lock_irq(&bio_dirty_lock); |
1439 | next = bio_dirty_list; | |
1da177e4 | 1440 | bio_dirty_list = NULL; |
24d5493f | 1441 | spin_unlock_irq(&bio_dirty_lock); |
1da177e4 | 1442 | |
24d5493f CH |
1443 | while ((bio = next) != NULL) { |
1444 | next = bio->bi_private; | |
1da177e4 | 1445 | |
d241a95f | 1446 | bio_release_pages(bio, true); |
1da177e4 | 1447 | bio_put(bio); |
1da177e4 LT |
1448 | } |
1449 | } | |
1450 | ||
1451 | void bio_check_pages_dirty(struct bio *bio) | |
1452 | { | |
cb34e057 | 1453 | struct bio_vec *bvec; |
24d5493f | 1454 | unsigned long flags; |
6dc4f100 | 1455 | struct bvec_iter_all iter_all; |
1da177e4 | 1456 | |
2b070cfe | 1457 | bio_for_each_segment_all(bvec, bio, iter_all) { |
24d5493f CH |
1458 | if (!PageDirty(bvec->bv_page) && !PageCompound(bvec->bv_page)) |
1459 | goto defer; | |
1da177e4 LT |
1460 | } |
1461 | ||
d241a95f | 1462 | bio_release_pages(bio, false); |
24d5493f CH |
1463 | bio_put(bio); |
1464 | return; | |
1465 | defer: | |
1466 | spin_lock_irqsave(&bio_dirty_lock, flags); | |
1467 | bio->bi_private = bio_dirty_list; | |
1468 | bio_dirty_list = bio; | |
1469 | spin_unlock_irqrestore(&bio_dirty_lock, flags); | |
1470 | schedule_work(&bio_dirty_work); | |
1da177e4 LT |
1471 | } |
1472 | ||
c4cf5261 JA |
1473 | static inline bool bio_remaining_done(struct bio *bio) |
1474 | { | |
1475 | /* | |
1476 | * If we're not chaining, then ->__bi_remaining is always 1 and | |
1477 | * we always end io on the first invocation. | |
1478 | */ | |
1479 | if (!bio_flagged(bio, BIO_CHAIN)) | |
1480 | return true; | |
1481 | ||
1482 | BUG_ON(atomic_read(&bio->__bi_remaining) <= 0); | |
1483 | ||
326e1dbb | 1484 | if (atomic_dec_and_test(&bio->__bi_remaining)) { |
b7c44ed9 | 1485 | bio_clear_flag(bio, BIO_CHAIN); |
c4cf5261 | 1486 | return true; |
326e1dbb | 1487 | } |
c4cf5261 JA |
1488 | |
1489 | return false; | |
1490 | } | |
1491 | ||
1da177e4 LT |
1492 | /** |
1493 | * bio_endio - end I/O on a bio | |
1494 | * @bio: bio | |
1da177e4 LT |
1495 | * |
1496 | * Description: | |
4246a0b6 CH |
1497 | * bio_endio() will end I/O on the whole bio. bio_endio() is the preferred |
1498 | * way to end I/O on a bio. No one should call bi_end_io() directly on a | |
1499 | * bio unless they own it and thus know that it has an end_io function. | |
fbbaf700 N |
1500 | * |
1501 | * bio_endio() can be called several times on a bio that has been chained | |
1502 | * using bio_chain(). The ->bi_end_io() function will only be called the | |
60b6a7e6 | 1503 | * last time. |
1da177e4 | 1504 | **/ |
4246a0b6 | 1505 | void bio_endio(struct bio *bio) |
1da177e4 | 1506 | { |
ba8c6967 | 1507 | again: |
2b885517 | 1508 | if (!bio_remaining_done(bio)) |
ba8c6967 | 1509 | return; |
7c20f116 CH |
1510 | if (!bio_integrity_endio(bio)) |
1511 | return; | |
1da177e4 | 1512 | |
a647a524 | 1513 | if (bio->bi_bdev && bio_flagged(bio, BIO_TRACKED)) |
3caee463 | 1514 | rq_qos_done_bio(bdev_get_queue(bio->bi_bdev), bio); |
67b42d0b | 1515 | |
60b6a7e6 | 1516 | if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) { |
3caee463 | 1517 | trace_block_bio_complete(bdev_get_queue(bio->bi_bdev), bio); |
60b6a7e6 EH |
1518 | bio_clear_flag(bio, BIO_TRACE_COMPLETION); |
1519 | } | |
1520 | ||
ba8c6967 CH |
1521 | /* |
1522 | * Need to have a real endio function for chained bios, otherwise | |
1523 | * various corner cases will break (like stacking block devices that | |
1524 | * save/restore bi_end_io) - however, we want to avoid unbounded | |
1525 | * recursion and blowing the stack. Tail call optimization would | |
1526 | * handle this, but compiling with frame pointers also disables | |
1527 | * gcc's sibling call optimization. | |
1528 | */ | |
1529 | if (bio->bi_end_io == bio_chain_endio) { | |
1530 | bio = __bio_chain_endio(bio); | |
1531 | goto again; | |
196d38bc | 1532 | } |
ba8c6967 | 1533 | |
9e234eea | 1534 | blk_throtl_bio_endio(bio); |
b222dd2f SL |
1535 | /* release cgroup info */ |
1536 | bio_uninit(bio); | |
ba8c6967 CH |
1537 | if (bio->bi_end_io) |
1538 | bio->bi_end_io(bio); | |
1da177e4 | 1539 | } |
a112a71d | 1540 | EXPORT_SYMBOL(bio_endio); |
1da177e4 | 1541 | |
20d0189b KO |
1542 | /** |
1543 | * bio_split - split a bio | |
1544 | * @bio: bio to split | |
1545 | * @sectors: number of sectors to split from the front of @bio | |
1546 | * @gfp: gfp mask | |
1547 | * @bs: bio set to allocate from | |
1548 | * | |
1549 | * Allocates and returns a new bio which represents @sectors from the start of | |
1550 | * @bio, and updates @bio to represent the remaining sectors. | |
1551 | * | |
f3f5da62 | 1552 | * Unless this is a discard request the newly allocated bio will point |
dad77584 BVA |
1553 | * to @bio's bi_io_vec. It is the caller's responsibility to ensure that |
1554 | * neither @bio nor @bs are freed before the split bio. | |
20d0189b KO |
1555 | */ |
1556 | struct bio *bio_split(struct bio *bio, int sectors, | |
1557 | gfp_t gfp, struct bio_set *bs) | |
1558 | { | |
f341a4d3 | 1559 | struct bio *split; |
20d0189b KO |
1560 | |
1561 | BUG_ON(sectors <= 0); | |
1562 | BUG_ON(sectors >= bio_sectors(bio)); | |
1563 | ||
0512a75b KB |
1564 | /* Zone append commands cannot be split */ |
1565 | if (WARN_ON_ONCE(bio_op(bio) == REQ_OP_ZONE_APPEND)) | |
1566 | return NULL; | |
1567 | ||
f9d03f96 | 1568 | split = bio_clone_fast(bio, gfp, bs); |
20d0189b KO |
1569 | if (!split) |
1570 | return NULL; | |
1571 | ||
1572 | split->bi_iter.bi_size = sectors << 9; | |
1573 | ||
1574 | if (bio_integrity(split)) | |
fbd08e76 | 1575 | bio_integrity_trim(split); |
20d0189b KO |
1576 | |
1577 | bio_advance(bio, split->bi_iter.bi_size); | |
1578 | ||
fbbaf700 | 1579 | if (bio_flagged(bio, BIO_TRACE_COMPLETION)) |
20d59023 | 1580 | bio_set_flag(split, BIO_TRACE_COMPLETION); |
fbbaf700 | 1581 | |
20d0189b KO |
1582 | return split; |
1583 | } | |
1584 | EXPORT_SYMBOL(bio_split); | |
1585 | ||
6678d83f KO |
1586 | /** |
1587 | * bio_trim - trim a bio | |
1588 | * @bio: bio to trim | |
1589 | * @offset: number of sectors to trim from the front of @bio | |
1590 | * @size: size we want to trim @bio to, in sectors | |
e83502ca CK |
1591 | * |
1592 | * This function is typically used for bios that are cloned and submitted | |
1593 | * to the underlying device in parts. | |
6678d83f | 1594 | */ |
e83502ca | 1595 | void bio_trim(struct bio *bio, sector_t offset, sector_t size) |
6678d83f | 1596 | { |
e83502ca CK |
1597 | if (WARN_ON_ONCE(offset > BIO_MAX_SECTORS || size > BIO_MAX_SECTORS || |
1598 | offset + size > bio->bi_iter.bi_size)) | |
1599 | return; | |
6678d83f KO |
1600 | |
1601 | size <<= 9; | |
4f024f37 | 1602 | if (offset == 0 && size == bio->bi_iter.bi_size) |
6678d83f KO |
1603 | return; |
1604 | ||
6678d83f | 1605 | bio_advance(bio, offset << 9); |
4f024f37 | 1606 | bio->bi_iter.bi_size = size; |
376a78ab DM |
1607 | |
1608 | if (bio_integrity(bio)) | |
fbd08e76 | 1609 | bio_integrity_trim(bio); |
6678d83f KO |
1610 | } |
1611 | EXPORT_SYMBOL_GPL(bio_trim); | |
1612 | ||
1da177e4 LT |
1613 | /* |
1614 | * create memory pools for biovec's in a bio_set. | |
1615 | * use the global biovec slabs created for general use. | |
1616 | */ | |
8aa6ba2f | 1617 | int biovec_init_pool(mempool_t *pool, int pool_entries) |
1da177e4 | 1618 | { |
7a800a20 | 1619 | struct biovec_slab *bp = bvec_slabs + ARRAY_SIZE(bvec_slabs) - 1; |
1da177e4 | 1620 | |
8aa6ba2f | 1621 | return mempool_init_slab_pool(pool, pool_entries, bp->slab); |
1da177e4 LT |
1622 | } |
1623 | ||
917a38c7 KO |
1624 | /* |
1625 | * bioset_exit - exit a bioset initialized with bioset_init() | |
1626 | * | |
1627 | * May be called on a zeroed but uninitialized bioset (i.e. allocated with | |
1628 | * kzalloc()). | |
1629 | */ | |
1630 | void bioset_exit(struct bio_set *bs) | |
1da177e4 | 1631 | { |
be4d234d | 1632 | bio_alloc_cache_destroy(bs); |
df2cb6da KO |
1633 | if (bs->rescue_workqueue) |
1634 | destroy_workqueue(bs->rescue_workqueue); | |
917a38c7 | 1635 | bs->rescue_workqueue = NULL; |
df2cb6da | 1636 | |
8aa6ba2f KO |
1637 | mempool_exit(&bs->bio_pool); |
1638 | mempool_exit(&bs->bvec_pool); | |
9f060e22 | 1639 | |
7878cba9 | 1640 | bioset_integrity_free(bs); |
917a38c7 KO |
1641 | if (bs->bio_slab) |
1642 | bio_put_slab(bs); | |
1643 | bs->bio_slab = NULL; | |
1644 | } | |
1645 | EXPORT_SYMBOL(bioset_exit); | |
1da177e4 | 1646 | |
917a38c7 KO |
1647 | /** |
1648 | * bioset_init - Initialize a bio_set | |
dad08527 | 1649 | * @bs: pool to initialize |
917a38c7 KO |
1650 | * @pool_size: Number of bio and bio_vecs to cache in the mempool |
1651 | * @front_pad: Number of bytes to allocate in front of the returned bio | |
1652 | * @flags: Flags to modify behavior, currently %BIOSET_NEED_BVECS | |
1653 | * and %BIOSET_NEED_RESCUER | |
1654 | * | |
dad08527 KO |
1655 | * Description: |
1656 | * Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller | |
1657 | * to ask for a number of bytes to be allocated in front of the bio. | |
1658 | * Front pad allocation is useful for embedding the bio inside | |
1659 | * another structure, to avoid allocating extra data to go with the bio. | |
1660 | * Note that the bio must be embedded at the END of that structure always, | |
1661 | * or things will break badly. | |
1662 | * If %BIOSET_NEED_BVECS is set in @flags, a separate pool will be allocated | |
1663 | * for allocating iovecs. This pool is not needed e.g. for bio_clone_fast(). | |
1664 | * If %BIOSET_NEED_RESCUER is set, a workqueue is created which can be used to | |
1665 | * dispatch queued requests when the mempool runs out of space. | |
1666 | * | |
917a38c7 KO |
1667 | */ |
1668 | int bioset_init(struct bio_set *bs, | |
1669 | unsigned int pool_size, | |
1670 | unsigned int front_pad, | |
1671 | int flags) | |
1672 | { | |
917a38c7 | 1673 | bs->front_pad = front_pad; |
9f180e31 ML |
1674 | if (flags & BIOSET_NEED_BVECS) |
1675 | bs->back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec); | |
1676 | else | |
1677 | bs->back_pad = 0; | |
917a38c7 KO |
1678 | |
1679 | spin_lock_init(&bs->rescue_lock); | |
1680 | bio_list_init(&bs->rescue_list); | |
1681 | INIT_WORK(&bs->rescue_work, bio_alloc_rescue); | |
1682 | ||
49d1ec85 | 1683 | bs->bio_slab = bio_find_or_create_slab(bs); |
917a38c7 KO |
1684 | if (!bs->bio_slab) |
1685 | return -ENOMEM; | |
1686 | ||
1687 | if (mempool_init_slab_pool(&bs->bio_pool, pool_size, bs->bio_slab)) | |
1688 | goto bad; | |
1689 | ||
1690 | if ((flags & BIOSET_NEED_BVECS) && | |
1691 | biovec_init_pool(&bs->bvec_pool, pool_size)) | |
1692 | goto bad; | |
1693 | ||
be4d234d JA |
1694 | if (flags & BIOSET_NEED_RESCUER) { |
1695 | bs->rescue_workqueue = alloc_workqueue("bioset", | |
1696 | WQ_MEM_RECLAIM, 0); | |
1697 | if (!bs->rescue_workqueue) | |
1698 | goto bad; | |
1699 | } | |
1700 | if (flags & BIOSET_PERCPU_CACHE) { | |
1701 | bs->cache = alloc_percpu(struct bio_alloc_cache); | |
1702 | if (!bs->cache) | |
1703 | goto bad; | |
1704 | cpuhp_state_add_instance_nocalls(CPUHP_BIO_DEAD, &bs->cpuhp_dead); | |
1705 | } | |
917a38c7 KO |
1706 | |
1707 | return 0; | |
1708 | bad: | |
1709 | bioset_exit(bs); | |
1710 | return -ENOMEM; | |
1711 | } | |
1712 | EXPORT_SYMBOL(bioset_init); | |
1713 | ||
28e89fd9 JA |
1714 | /* |
1715 | * Initialize and setup a new bio_set, based on the settings from | |
1716 | * another bio_set. | |
1717 | */ | |
1718 | int bioset_init_from_src(struct bio_set *bs, struct bio_set *src) | |
1719 | { | |
1720 | int flags; | |
1721 | ||
1722 | flags = 0; | |
1723 | if (src->bvec_pool.min_nr) | |
1724 | flags |= BIOSET_NEED_BVECS; | |
1725 | if (src->rescue_workqueue) | |
1726 | flags |= BIOSET_NEED_RESCUER; | |
1727 | ||
1728 | return bioset_init(bs, src->bio_pool.min_nr, src->front_pad, flags); | |
1729 | } | |
1730 | EXPORT_SYMBOL(bioset_init_from_src); | |
1731 | ||
be4d234d JA |
1732 | /** |
1733 | * bio_alloc_kiocb - Allocate a bio from bio_set based on kiocb | |
1734 | * @kiocb: kiocb describing the IO | |
b77c88c2 | 1735 | * @bdev: block device to allocate the bio for (can be %NULL) |
0ef47db1 | 1736 | * @nr_vecs: number of iovecs to pre-allocate |
b77c88c2 | 1737 | * @opf: operation and flags for bio |
be4d234d JA |
1738 | * @bs: bio_set to allocate from |
1739 | * | |
1740 | * Description: | |
1741 | * Like @bio_alloc_bioset, but pass in the kiocb. The kiocb is only | |
1742 | * used to check if we should dip into the per-cpu bio_set allocation | |
3d5b3fbe JA |
1743 | * cache. The allocation uses GFP_KERNEL internally. On return, the |
1744 | * bio is marked BIO_PERCPU_CACHEABLE, and the final put of the bio | |
1745 | * MUST be done from process context, not hard/soft IRQ. | |
be4d234d JA |
1746 | * |
1747 | */ | |
b77c88c2 CH |
1748 | struct bio *bio_alloc_kiocb(struct kiocb *kiocb, struct block_device *bdev, |
1749 | unsigned short nr_vecs, unsigned int opf, struct bio_set *bs) | |
be4d234d JA |
1750 | { |
1751 | struct bio_alloc_cache *cache; | |
1752 | struct bio *bio; | |
1753 | ||
1754 | if (!(kiocb->ki_flags & IOCB_ALLOC_CACHE) || nr_vecs > BIO_INLINE_VECS) | |
b77c88c2 | 1755 | return bio_alloc_bioset(bdev, nr_vecs, opf, GFP_KERNEL, bs); |
be4d234d JA |
1756 | |
1757 | cache = per_cpu_ptr(bs->cache, get_cpu()); | |
fcade2ce JA |
1758 | if (cache->free_list) { |
1759 | bio = cache->free_list; | |
1760 | cache->free_list = bio->bi_next; | |
be4d234d JA |
1761 | cache->nr--; |
1762 | put_cpu(); | |
49add496 CH |
1763 | bio_init(bio, bdev, nr_vecs ? bio->bi_inline_vecs : NULL, |
1764 | nr_vecs, opf); | |
be4d234d JA |
1765 | bio->bi_pool = bs; |
1766 | bio_set_flag(bio, BIO_PERCPU_CACHE); | |
1767 | return bio; | |
1768 | } | |
1769 | put_cpu(); | |
b77c88c2 | 1770 | bio = bio_alloc_bioset(bdev, nr_vecs, opf, GFP_KERNEL, bs); |
be4d234d JA |
1771 | bio_set_flag(bio, BIO_PERCPU_CACHE); |
1772 | return bio; | |
1773 | } | |
1774 | EXPORT_SYMBOL_GPL(bio_alloc_kiocb); | |
1775 | ||
de76fd89 | 1776 | static int __init init_bio(void) |
1da177e4 LT |
1777 | { |
1778 | int i; | |
1779 | ||
7878cba9 | 1780 | bio_integrity_init(); |
1da177e4 | 1781 | |
de76fd89 CH |
1782 | for (i = 0; i < ARRAY_SIZE(bvec_slabs); i++) { |
1783 | struct biovec_slab *bvs = bvec_slabs + i; | |
a7fcd37c | 1784 | |
de76fd89 CH |
1785 | bvs->slab = kmem_cache_create(bvs->name, |
1786 | bvs->nr_vecs * sizeof(struct bio_vec), 0, | |
1787 | SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); | |
1da177e4 | 1788 | } |
1da177e4 | 1789 | |
be4d234d JA |
1790 | cpuhp_setup_state_multi(CPUHP_BIO_DEAD, "block/bio:dead", NULL, |
1791 | bio_cpu_dead); | |
1792 | ||
f4f8154a | 1793 | if (bioset_init(&fs_bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS)) |
1da177e4 LT |
1794 | panic("bio: can't allocate bios\n"); |
1795 | ||
f4f8154a | 1796 | if (bioset_integrity_create(&fs_bio_set, BIO_POOL_SIZE)) |
a91a2785 MP |
1797 | panic("bio: can't create integrity pool\n"); |
1798 | ||
1da177e4 LT |
1799 | return 0; |
1800 | } | |
1da177e4 | 1801 | subsys_initcall(init_bio); |