Commit | Line | Data |
---|---|---|
8c16567d | 1 | // SPDX-License-Identifier: GPL-2.0 |
1da177e4 | 2 | /* |
0fe23479 | 3 | * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk> |
1da177e4 LT |
4 | */ |
5 | #include <linux/mm.h> | |
6 | #include <linux/swap.h> | |
7 | #include <linux/bio.h> | |
8 | #include <linux/blkdev.h> | |
a27bb332 | 9 | #include <linux/uio.h> |
852c788f | 10 | #include <linux/iocontext.h> |
1da177e4 LT |
11 | #include <linux/slab.h> |
12 | #include <linux/init.h> | |
13 | #include <linux/kernel.h> | |
630d9c47 | 14 | #include <linux/export.h> |
1da177e4 LT |
15 | #include <linux/mempool.h> |
16 | #include <linux/workqueue.h> | |
852c788f | 17 | #include <linux/cgroup.h> |
b4c5875d | 18 | #include <linux/highmem.h> |
a892c8d5 | 19 | #include <linux/blk-crypto.h> |
49d1ec85 | 20 | #include <linux/xarray.h> |
1da177e4 | 21 | |
55782138 | 22 | #include <trace/events/block.h> |
9e234eea | 23 | #include "blk.h" |
67b42d0b | 24 | #include "blk-rq-qos.h" |
672fdcf0 | 25 | #include "blk-cgroup.h" |
0bfc2455 | 26 | |
b99182c5 | 27 | #define ALLOC_CACHE_THRESHOLD 16 |
42b2b2fb | 28 | #define ALLOC_CACHE_MAX 256 |
b99182c5 | 29 | |
be4d234d | 30 | struct bio_alloc_cache { |
fcade2ce | 31 | struct bio *free_list; |
b99182c5 | 32 | struct bio *free_list_irq; |
be4d234d | 33 | unsigned int nr; |
b99182c5 | 34 | unsigned int nr_irq; |
be4d234d JA |
35 | }; |
36 | ||
de76fd89 | 37 | static struct biovec_slab { |
6ac0b715 CH |
38 | int nr_vecs; |
39 | char *name; | |
40 | struct kmem_cache *slab; | |
de76fd89 CH |
41 | } bvec_slabs[] __read_mostly = { |
42 | { .nr_vecs = 16, .name = "biovec-16" }, | |
43 | { .nr_vecs = 64, .name = "biovec-64" }, | |
44 | { .nr_vecs = 128, .name = "biovec-128" }, | |
a8affc03 | 45 | { .nr_vecs = BIO_MAX_VECS, .name = "biovec-max" }, |
1da177e4 | 46 | }; |
6ac0b715 | 47 | |
7a800a20 CH |
48 | static struct biovec_slab *biovec_slab(unsigned short nr_vecs) |
49 | { | |
50 | switch (nr_vecs) { | |
51 | /* smaller bios use inline vecs */ | |
52 | case 5 ... 16: | |
53 | return &bvec_slabs[0]; | |
54 | case 17 ... 64: | |
55 | return &bvec_slabs[1]; | |
56 | case 65 ... 128: | |
57 | return &bvec_slabs[2]; | |
a8affc03 | 58 | case 129 ... BIO_MAX_VECS: |
7a800a20 CH |
59 | return &bvec_slabs[3]; |
60 | default: | |
61 | BUG(); | |
62 | return NULL; | |
63 | } | |
64 | } | |
1da177e4 | 65 | |
1da177e4 LT |
66 | /* |
67 | * fs_bio_set is the bio_set containing bio and iovec memory pools used by | |
68 | * IO code that does not need private memory pools. | |
69 | */ | |
f4f8154a | 70 | struct bio_set fs_bio_set; |
3f86a82a | 71 | EXPORT_SYMBOL(fs_bio_set); |
1da177e4 | 72 | |
bb799ca0 JA |
73 | /* |
74 | * Our slab pool management | |
75 | */ | |
76 | struct bio_slab { | |
77 | struct kmem_cache *slab; | |
78 | unsigned int slab_ref; | |
79 | unsigned int slab_size; | |
80 | char name[8]; | |
81 | }; | |
82 | static DEFINE_MUTEX(bio_slab_lock); | |
49d1ec85 | 83 | static DEFINE_XARRAY(bio_slabs); |
bb799ca0 | 84 | |
49d1ec85 | 85 | static struct bio_slab *create_bio_slab(unsigned int size) |
bb799ca0 | 86 | { |
49d1ec85 | 87 | struct bio_slab *bslab = kzalloc(sizeof(*bslab), GFP_KERNEL); |
bb799ca0 | 88 | |
49d1ec85 ML |
89 | if (!bslab) |
90 | return NULL; | |
bb799ca0 | 91 | |
49d1ec85 ML |
92 | snprintf(bslab->name, sizeof(bslab->name), "bio-%d", size); |
93 | bslab->slab = kmem_cache_create(bslab->name, size, | |
1a7e76e4 CH |
94 | ARCH_KMALLOC_MINALIGN, |
95 | SLAB_HWCACHE_ALIGN | SLAB_TYPESAFE_BY_RCU, NULL); | |
49d1ec85 ML |
96 | if (!bslab->slab) |
97 | goto fail_alloc_slab; | |
bb799ca0 | 98 | |
49d1ec85 ML |
99 | bslab->slab_ref = 1; |
100 | bslab->slab_size = size; | |
bb799ca0 | 101 | |
49d1ec85 ML |
102 | if (!xa_err(xa_store(&bio_slabs, size, bslab, GFP_KERNEL))) |
103 | return bslab; | |
bb799ca0 | 104 | |
49d1ec85 | 105 | kmem_cache_destroy(bslab->slab); |
bb799ca0 | 106 | |
49d1ec85 ML |
107 | fail_alloc_slab: |
108 | kfree(bslab); | |
109 | return NULL; | |
110 | } | |
bb799ca0 | 111 | |
49d1ec85 ML |
112 | static inline unsigned int bs_bio_slab_size(struct bio_set *bs) |
113 | { | |
9f180e31 | 114 | return bs->front_pad + sizeof(struct bio) + bs->back_pad; |
49d1ec85 | 115 | } |
bb799ca0 | 116 | |
49d1ec85 ML |
117 | static struct kmem_cache *bio_find_or_create_slab(struct bio_set *bs) |
118 | { | |
119 | unsigned int size = bs_bio_slab_size(bs); | |
120 | struct bio_slab *bslab; | |
bb799ca0 | 121 | |
49d1ec85 ML |
122 | mutex_lock(&bio_slab_lock); |
123 | bslab = xa_load(&bio_slabs, size); | |
124 | if (bslab) | |
125 | bslab->slab_ref++; | |
126 | else | |
127 | bslab = create_bio_slab(size); | |
bb799ca0 | 128 | mutex_unlock(&bio_slab_lock); |
49d1ec85 ML |
129 | |
130 | if (bslab) | |
131 | return bslab->slab; | |
132 | return NULL; | |
bb799ca0 JA |
133 | } |
134 | ||
135 | static void bio_put_slab(struct bio_set *bs) | |
136 | { | |
137 | struct bio_slab *bslab = NULL; | |
49d1ec85 | 138 | unsigned int slab_size = bs_bio_slab_size(bs); |
bb799ca0 JA |
139 | |
140 | mutex_lock(&bio_slab_lock); | |
141 | ||
49d1ec85 | 142 | bslab = xa_load(&bio_slabs, slab_size); |
bb799ca0 JA |
143 | if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n")) |
144 | goto out; | |
145 | ||
49d1ec85 ML |
146 | WARN_ON_ONCE(bslab->slab != bs->bio_slab); |
147 | ||
bb799ca0 JA |
148 | WARN_ON(!bslab->slab_ref); |
149 | ||
150 | if (--bslab->slab_ref) | |
151 | goto out; | |
152 | ||
49d1ec85 ML |
153 | xa_erase(&bio_slabs, slab_size); |
154 | ||
bb799ca0 | 155 | kmem_cache_destroy(bslab->slab); |
49d1ec85 | 156 | kfree(bslab); |
bb799ca0 JA |
157 | |
158 | out: | |
159 | mutex_unlock(&bio_slab_lock); | |
160 | } | |
161 | ||
7a800a20 | 162 | void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned short nr_vecs) |
7ba1ba12 | 163 | { |
9e8c0d0d | 164 | BUG_ON(nr_vecs > BIO_MAX_VECS); |
ed996a52 | 165 | |
a8affc03 | 166 | if (nr_vecs == BIO_MAX_VECS) |
9f060e22 | 167 | mempool_free(bv, pool); |
7a800a20 CH |
168 | else if (nr_vecs > BIO_INLINE_VECS) |
169 | kmem_cache_free(biovec_slab(nr_vecs)->slab, bv); | |
bb799ca0 | 170 | } |
bb799ca0 | 171 | |
f2c3eb9b CH |
172 | /* |
173 | * Make the first allocation restricted and don't dump info on allocation | |
174 | * failures, since we'll fall back to the mempool in case of failure. | |
175 | */ | |
176 | static inline gfp_t bvec_alloc_gfp(gfp_t gfp) | |
177 | { | |
178 | return (gfp & ~(__GFP_DIRECT_RECLAIM | __GFP_IO)) | | |
179 | __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN; | |
bb799ca0 JA |
180 | } |
181 | ||
7a800a20 CH |
182 | struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs, |
183 | gfp_t gfp_mask) | |
1da177e4 | 184 | { |
7a800a20 | 185 | struct biovec_slab *bvs = biovec_slab(*nr_vecs); |
1da177e4 | 186 | |
7a800a20 | 187 | if (WARN_ON_ONCE(!bvs)) |
7ff9345f | 188 | return NULL; |
7ff9345f JA |
189 | |
190 | /* | |
7a800a20 CH |
191 | * Upgrade the nr_vecs request to take full advantage of the allocation. |
192 | * We also rely on this in the bvec_free path. | |
7ff9345f | 193 | */ |
7a800a20 | 194 | *nr_vecs = bvs->nr_vecs; |
7ff9345f | 195 | |
7ff9345f | 196 | /* |
f007a3d6 CH |
197 | * Try a slab allocation first for all smaller allocations. If that |
198 | * fails and __GFP_DIRECT_RECLAIM is set retry with the mempool. | |
a8affc03 | 199 | * The mempool is sized to handle up to BIO_MAX_VECS entries. |
7ff9345f | 200 | */ |
a8affc03 | 201 | if (*nr_vecs < BIO_MAX_VECS) { |
f007a3d6 | 202 | struct bio_vec *bvl; |
1da177e4 | 203 | |
f2c3eb9b | 204 | bvl = kmem_cache_alloc(bvs->slab, bvec_alloc_gfp(gfp_mask)); |
7a800a20 | 205 | if (likely(bvl) || !(gfp_mask & __GFP_DIRECT_RECLAIM)) |
f007a3d6 | 206 | return bvl; |
a8affc03 | 207 | *nr_vecs = BIO_MAX_VECS; |
7ff9345f JA |
208 | } |
209 | ||
f007a3d6 | 210 | return mempool_alloc(pool, gfp_mask); |
1da177e4 LT |
211 | } |
212 | ||
9ae3b3f5 | 213 | void bio_uninit(struct bio *bio) |
1da177e4 | 214 | { |
db9819c7 CH |
215 | #ifdef CONFIG_BLK_CGROUP |
216 | if (bio->bi_blkg) { | |
217 | blkg_put(bio->bi_blkg); | |
218 | bio->bi_blkg = NULL; | |
219 | } | |
220 | #endif | |
ece841ab JT |
221 | if (bio_integrity(bio)) |
222 | bio_integrity_free(bio); | |
a892c8d5 ST |
223 | |
224 | bio_crypt_free_ctx(bio); | |
4254bba1 | 225 | } |
9ae3b3f5 | 226 | EXPORT_SYMBOL(bio_uninit); |
7ba1ba12 | 227 | |
4254bba1 KO |
228 | static void bio_free(struct bio *bio) |
229 | { | |
230 | struct bio_set *bs = bio->bi_pool; | |
066ff571 | 231 | void *p = bio; |
4254bba1 | 232 | |
066ff571 | 233 | WARN_ON_ONCE(!bs); |
4254bba1 | 234 | |
066ff571 CH |
235 | bio_uninit(bio); |
236 | bvec_free(&bs->bvec_pool, bio->bi_io_vec, bio->bi_max_vecs); | |
237 | mempool_free(p - bs->front_pad, &bs->bio_pool); | |
3676347a PO |
238 | } |
239 | ||
9ae3b3f5 JA |
240 | /* |
241 | * Users of this function have their own bio allocation. Subsequently, | |
242 | * they must remember to pair any call to bio_init() with bio_uninit() | |
243 | * when IO has completed, or when the bio is released. | |
244 | */ | |
49add496 | 245 | void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table, |
16458cf3 | 246 | unsigned short max_vecs, blk_opf_t opf) |
1da177e4 | 247 | { |
da521626 | 248 | bio->bi_next = NULL; |
49add496 CH |
249 | bio->bi_bdev = bdev; |
250 | bio->bi_opf = opf; | |
da521626 JA |
251 | bio->bi_flags = 0; |
252 | bio->bi_ioprio = 0; | |
da521626 JA |
253 | bio->bi_status = 0; |
254 | bio->bi_iter.bi_sector = 0; | |
255 | bio->bi_iter.bi_size = 0; | |
256 | bio->bi_iter.bi_idx = 0; | |
257 | bio->bi_iter.bi_bvec_done = 0; | |
258 | bio->bi_end_io = NULL; | |
259 | bio->bi_private = NULL; | |
260 | #ifdef CONFIG_BLK_CGROUP | |
261 | bio->bi_blkg = NULL; | |
262 | bio->bi_issue.value = 0; | |
49add496 CH |
263 | if (bdev) |
264 | bio_associate_blkg(bio); | |
da521626 JA |
265 | #ifdef CONFIG_BLK_CGROUP_IOCOST |
266 | bio->bi_iocost_cost = 0; | |
267 | #endif | |
268 | #endif | |
269 | #ifdef CONFIG_BLK_INLINE_ENCRYPTION | |
270 | bio->bi_crypt_context = NULL; | |
271 | #endif | |
272 | #ifdef CONFIG_BLK_DEV_INTEGRITY | |
273 | bio->bi_integrity = NULL; | |
274 | #endif | |
275 | bio->bi_vcnt = 0; | |
276 | ||
c4cf5261 | 277 | atomic_set(&bio->__bi_remaining, 1); |
dac56212 | 278 | atomic_set(&bio->__bi_cnt, 1); |
3e08773c | 279 | bio->bi_cookie = BLK_QC_T_NONE; |
3a83f467 | 280 | |
3a83f467 | 281 | bio->bi_max_vecs = max_vecs; |
da521626 JA |
282 | bio->bi_io_vec = table; |
283 | bio->bi_pool = NULL; | |
1da177e4 | 284 | } |
a112a71d | 285 | EXPORT_SYMBOL(bio_init); |
1da177e4 | 286 | |
f44b48c7 KO |
287 | /** |
288 | * bio_reset - reinitialize a bio | |
289 | * @bio: bio to reset | |
a7c50c94 CH |
290 | * @bdev: block device to use the bio for |
291 | * @opf: operation and flags for bio | |
f44b48c7 KO |
292 | * |
293 | * Description: | |
294 | * After calling bio_reset(), @bio will be in the same state as a freshly | |
295 | * allocated bio returned bio bio_alloc_bioset() - the only fields that are | |
296 | * preserved are the ones that are initialized by bio_alloc_bioset(). See | |
297 | * comment in struct bio. | |
298 | */ | |
16458cf3 | 299 | void bio_reset(struct bio *bio, struct block_device *bdev, blk_opf_t opf) |
f44b48c7 | 300 | { |
9ae3b3f5 | 301 | bio_uninit(bio); |
f44b48c7 | 302 | memset(bio, 0, BIO_RESET_BYTES); |
c4cf5261 | 303 | atomic_set(&bio->__bi_remaining, 1); |
a7c50c94 | 304 | bio->bi_bdev = bdev; |
78e34374 CH |
305 | if (bio->bi_bdev) |
306 | bio_associate_blkg(bio); | |
a7c50c94 | 307 | bio->bi_opf = opf; |
f44b48c7 KO |
308 | } |
309 | EXPORT_SYMBOL(bio_reset); | |
310 | ||
38f8baae | 311 | static struct bio *__bio_chain_endio(struct bio *bio) |
196d38bc | 312 | { |
4246a0b6 CH |
313 | struct bio *parent = bio->bi_private; |
314 | ||
3edf5346 | 315 | if (bio->bi_status && !parent->bi_status) |
4e4cbee9 | 316 | parent->bi_status = bio->bi_status; |
196d38bc | 317 | bio_put(bio); |
38f8baae CH |
318 | return parent; |
319 | } | |
320 | ||
321 | static void bio_chain_endio(struct bio *bio) | |
322 | { | |
323 | bio_endio(__bio_chain_endio(bio)); | |
196d38bc KO |
324 | } |
325 | ||
326 | /** | |
327 | * bio_chain - chain bio completions | |
1051a902 | 328 | * @bio: the target bio |
5b874af6 | 329 | * @parent: the parent bio of @bio |
196d38bc KO |
330 | * |
331 | * The caller won't have a bi_end_io called when @bio completes - instead, | |
332 | * @parent's bi_end_io won't be called until both @parent and @bio have | |
333 | * completed; the chained bio will also be freed when it completes. | |
334 | * | |
335 | * The caller must not set bi_private or bi_end_io in @bio. | |
336 | */ | |
337 | void bio_chain(struct bio *bio, struct bio *parent) | |
338 | { | |
339 | BUG_ON(bio->bi_private || bio->bi_end_io); | |
340 | ||
341 | bio->bi_private = parent; | |
342 | bio->bi_end_io = bio_chain_endio; | |
c4cf5261 | 343 | bio_inc_remaining(parent); |
196d38bc KO |
344 | } |
345 | EXPORT_SYMBOL(bio_chain); | |
346 | ||
0a3140ea | 347 | struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev, |
16458cf3 | 348 | unsigned int nr_pages, blk_opf_t opf, gfp_t gfp) |
3b005bf6 | 349 | { |
07888c66 | 350 | struct bio *new = bio_alloc(bdev, nr_pages, opf, gfp); |
0a3140ea | 351 | |
3b005bf6 CH |
352 | if (bio) { |
353 | bio_chain(bio, new); | |
354 | submit_bio(bio); | |
355 | } | |
356 | ||
357 | return new; | |
358 | } | |
359 | EXPORT_SYMBOL_GPL(blk_next_bio); | |
360 | ||
df2cb6da KO |
361 | static void bio_alloc_rescue(struct work_struct *work) |
362 | { | |
363 | struct bio_set *bs = container_of(work, struct bio_set, rescue_work); | |
364 | struct bio *bio; | |
365 | ||
366 | while (1) { | |
367 | spin_lock(&bs->rescue_lock); | |
368 | bio = bio_list_pop(&bs->rescue_list); | |
369 | spin_unlock(&bs->rescue_lock); | |
370 | ||
371 | if (!bio) | |
372 | break; | |
373 | ||
ed00aabd | 374 | submit_bio_noacct(bio); |
df2cb6da KO |
375 | } |
376 | } | |
377 | ||
378 | static void punt_bios_to_rescuer(struct bio_set *bs) | |
379 | { | |
380 | struct bio_list punt, nopunt; | |
381 | struct bio *bio; | |
382 | ||
47e0fb46 N |
383 | if (WARN_ON_ONCE(!bs->rescue_workqueue)) |
384 | return; | |
df2cb6da KO |
385 | /* |
386 | * In order to guarantee forward progress we must punt only bios that | |
387 | * were allocated from this bio_set; otherwise, if there was a bio on | |
388 | * there for a stacking driver higher up in the stack, processing it | |
389 | * could require allocating bios from this bio_set, and doing that from | |
390 | * our own rescuer would be bad. | |
391 | * | |
392 | * Since bio lists are singly linked, pop them all instead of trying to | |
393 | * remove from the middle of the list: | |
394 | */ | |
395 | ||
396 | bio_list_init(&punt); | |
397 | bio_list_init(&nopunt); | |
398 | ||
f5fe1b51 | 399 | while ((bio = bio_list_pop(¤t->bio_list[0]))) |
df2cb6da | 400 | bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio); |
f5fe1b51 | 401 | current->bio_list[0] = nopunt; |
df2cb6da | 402 | |
f5fe1b51 N |
403 | bio_list_init(&nopunt); |
404 | while ((bio = bio_list_pop(¤t->bio_list[1]))) | |
405 | bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio); | |
406 | current->bio_list[1] = nopunt; | |
df2cb6da KO |
407 | |
408 | spin_lock(&bs->rescue_lock); | |
409 | bio_list_merge(&bs->rescue_list, &punt); | |
410 | spin_unlock(&bs->rescue_lock); | |
411 | ||
412 | queue_work(bs->rescue_workqueue, &bs->rescue_work); | |
413 | } | |
414 | ||
b99182c5 PB |
415 | static void bio_alloc_irq_cache_splice(struct bio_alloc_cache *cache) |
416 | { | |
417 | unsigned long flags; | |
418 | ||
419 | /* cache->free_list must be empty */ | |
420 | if (WARN_ON_ONCE(cache->free_list)) | |
421 | return; | |
422 | ||
423 | local_irq_save(flags); | |
424 | cache->free_list = cache->free_list_irq; | |
425 | cache->free_list_irq = NULL; | |
426 | cache->nr += cache->nr_irq; | |
427 | cache->nr_irq = 0; | |
428 | local_irq_restore(flags); | |
429 | } | |
430 | ||
0df71650 | 431 | static struct bio *bio_alloc_percpu_cache(struct block_device *bdev, |
16458cf3 | 432 | unsigned short nr_vecs, blk_opf_t opf, gfp_t gfp, |
0df71650 MS |
433 | struct bio_set *bs) |
434 | { | |
435 | struct bio_alloc_cache *cache; | |
436 | struct bio *bio; | |
437 | ||
438 | cache = per_cpu_ptr(bs->cache, get_cpu()); | |
439 | if (!cache->free_list) { | |
b99182c5 PB |
440 | if (READ_ONCE(cache->nr_irq) >= ALLOC_CACHE_THRESHOLD) |
441 | bio_alloc_irq_cache_splice(cache); | |
442 | if (!cache->free_list) { | |
443 | put_cpu(); | |
444 | return NULL; | |
445 | } | |
0df71650 MS |
446 | } |
447 | bio = cache->free_list; | |
448 | cache->free_list = bio->bi_next; | |
449 | cache->nr--; | |
450 | put_cpu(); | |
451 | ||
452 | bio_init(bio, bdev, nr_vecs ? bio->bi_inline_vecs : NULL, nr_vecs, opf); | |
453 | bio->bi_pool = bs; | |
454 | return bio; | |
455 | } | |
456 | ||
1da177e4 LT |
457 | /** |
458 | * bio_alloc_bioset - allocate a bio for I/O | |
609be106 CH |
459 | * @bdev: block device to allocate the bio for (can be %NULL) |
460 | * @nr_vecs: number of bvecs to pre-allocate | |
461 | * @opf: operation and flags for bio | |
519c8e9f | 462 | * @gfp_mask: the GFP_* mask given to the slab allocator |
db18efac | 463 | * @bs: the bio_set to allocate from. |
1da177e4 | 464 | * |
3175199a | 465 | * Allocate a bio from the mempools in @bs. |
3f86a82a | 466 | * |
3175199a CH |
467 | * If %__GFP_DIRECT_RECLAIM is set then bio_alloc will always be able to |
468 | * allocate a bio. This is due to the mempool guarantees. To make this work, | |
469 | * callers must never allocate more than 1 bio at a time from the general pool. | |
470 | * Callers that need to allocate more than 1 bio must always submit the | |
471 | * previously allocated bio for IO before attempting to allocate a new one. | |
472 | * Failure to do so can cause deadlocks under memory pressure. | |
3f86a82a | 473 | * |
3175199a CH |
474 | * Note that when running under submit_bio_noacct() (i.e. any block driver), |
475 | * bios are not submitted until after you return - see the code in | |
476 | * submit_bio_noacct() that converts recursion into iteration, to prevent | |
477 | * stack overflows. | |
df2cb6da | 478 | * |
3175199a CH |
479 | * This would normally mean allocating multiple bios under submit_bio_noacct() |
480 | * would be susceptible to deadlocks, but we have | |
481 | * deadlock avoidance code that resubmits any blocked bios from a rescuer | |
482 | * thread. | |
df2cb6da | 483 | * |
3175199a CH |
484 | * However, we do not guarantee forward progress for allocations from other |
485 | * mempools. Doing multiple allocations from the same mempool under | |
486 | * submit_bio_noacct() should be avoided - instead, use bio_set's front_pad | |
487 | * for per bio allocations. | |
df2cb6da | 488 | * |
3175199a | 489 | * Returns: Pointer to new bio on success, NULL on failure. |
3f86a82a | 490 | */ |
609be106 | 491 | struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs, |
16458cf3 | 492 | blk_opf_t opf, gfp_t gfp_mask, |
7a88fa19 | 493 | struct bio_set *bs) |
1da177e4 | 494 | { |
df2cb6da | 495 | gfp_t saved_gfp = gfp_mask; |
451a9ebf TH |
496 | struct bio *bio; |
497 | void *p; | |
498 | ||
609be106 CH |
499 | /* should not use nobvec bioset for nr_vecs > 0 */ |
500 | if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) && nr_vecs > 0)) | |
3175199a | 501 | return NULL; |
df2cb6da | 502 | |
0df71650 MS |
503 | if (opf & REQ_ALLOC_CACHE) { |
504 | if (bs->cache && nr_vecs <= BIO_INLINE_VECS) { | |
505 | bio = bio_alloc_percpu_cache(bdev, nr_vecs, opf, | |
506 | gfp_mask, bs); | |
507 | if (bio) | |
508 | return bio; | |
509 | /* | |
510 | * No cached bio available, bio returned below marked with | |
511 | * REQ_ALLOC_CACHE to particpate in per-cpu alloc cache. | |
512 | */ | |
513 | } else { | |
514 | opf &= ~REQ_ALLOC_CACHE; | |
515 | } | |
516 | } | |
517 | ||
3175199a CH |
518 | /* |
519 | * submit_bio_noacct() converts recursion to iteration; this means if | |
520 | * we're running beneath it, any bios we allocate and submit will not be | |
521 | * submitted (and thus freed) until after we return. | |
522 | * | |
523 | * This exposes us to a potential deadlock if we allocate multiple bios | |
524 | * from the same bio_set() while running underneath submit_bio_noacct(). | |
525 | * If we were to allocate multiple bios (say a stacking block driver | |
526 | * that was splitting bios), we would deadlock if we exhausted the | |
527 | * mempool's reserve. | |
528 | * | |
529 | * We solve this, and guarantee forward progress, with a rescuer | |
530 | * workqueue per bio_set. If we go to allocate and there are bios on | |
531 | * current->bio_list, we first try the allocation without | |
532 | * __GFP_DIRECT_RECLAIM; if that fails, we punt those bios we would be | |
533 | * blocking to the rescuer workqueue before we retry with the original | |
534 | * gfp_flags. | |
535 | */ | |
536 | if (current->bio_list && | |
537 | (!bio_list_empty(¤t->bio_list[0]) || | |
538 | !bio_list_empty(¤t->bio_list[1])) && | |
539 | bs->rescue_workqueue) | |
540 | gfp_mask &= ~__GFP_DIRECT_RECLAIM; | |
541 | ||
542 | p = mempool_alloc(&bs->bio_pool, gfp_mask); | |
543 | if (!p && gfp_mask != saved_gfp) { | |
544 | punt_bios_to_rescuer(bs); | |
545 | gfp_mask = saved_gfp; | |
8aa6ba2f | 546 | p = mempool_alloc(&bs->bio_pool, gfp_mask); |
3f86a82a | 547 | } |
451a9ebf TH |
548 | if (unlikely(!p)) |
549 | return NULL; | |
759aa12f PB |
550 | if (!mempool_is_saturated(&bs->bio_pool)) |
551 | opf &= ~REQ_ALLOC_CACHE; | |
1da177e4 | 552 | |
3175199a | 553 | bio = p + bs->front_pad; |
609be106 | 554 | if (nr_vecs > BIO_INLINE_VECS) { |
3175199a | 555 | struct bio_vec *bvl = NULL; |
34053979 | 556 | |
609be106 | 557 | bvl = bvec_alloc(&bs->bvec_pool, &nr_vecs, gfp_mask); |
df2cb6da KO |
558 | if (!bvl && gfp_mask != saved_gfp) { |
559 | punt_bios_to_rescuer(bs); | |
560 | gfp_mask = saved_gfp; | |
609be106 | 561 | bvl = bvec_alloc(&bs->bvec_pool, &nr_vecs, gfp_mask); |
df2cb6da | 562 | } |
34053979 IM |
563 | if (unlikely(!bvl)) |
564 | goto err_free; | |
a38352e0 | 565 | |
49add496 | 566 | bio_init(bio, bdev, bvl, nr_vecs, opf); |
609be106 | 567 | } else if (nr_vecs) { |
49add496 | 568 | bio_init(bio, bdev, bio->bi_inline_vecs, BIO_INLINE_VECS, opf); |
3175199a | 569 | } else { |
49add496 | 570 | bio_init(bio, bdev, NULL, 0, opf); |
1da177e4 | 571 | } |
3f86a82a KO |
572 | |
573 | bio->bi_pool = bs; | |
1da177e4 | 574 | return bio; |
34053979 IM |
575 | |
576 | err_free: | |
8aa6ba2f | 577 | mempool_free(p, &bs->bio_pool); |
34053979 | 578 | return NULL; |
1da177e4 | 579 | } |
a112a71d | 580 | EXPORT_SYMBOL(bio_alloc_bioset); |
1da177e4 | 581 | |
3175199a | 582 | /** |
066ff571 CH |
583 | * bio_kmalloc - kmalloc a bio |
584 | * @nr_vecs: number of bio_vecs to allocate | |
3175199a | 585 | * @gfp_mask: the GFP_* mask given to the slab allocator |
3175199a | 586 | * |
066ff571 CH |
587 | * Use kmalloc to allocate a bio (including bvecs). The bio must be initialized |
588 | * using bio_init() before use. To free a bio returned from this function use | |
589 | * kfree() after calling bio_uninit(). A bio returned from this function can | |
590 | * be reused by calling bio_uninit() before calling bio_init() again. | |
591 | * | |
592 | * Note that unlike bio_alloc() or bio_alloc_bioset() allocations from this | |
340e1347 | 593 | * function are not backed by a mempool can fail. Do not use this function |
066ff571 | 594 | * for allocations in the file system I/O path. |
3175199a CH |
595 | * |
596 | * Returns: Pointer to new bio on success, NULL on failure. | |
597 | */ | |
066ff571 | 598 | struct bio *bio_kmalloc(unsigned short nr_vecs, gfp_t gfp_mask) |
3175199a CH |
599 | { |
600 | struct bio *bio; | |
601 | ||
066ff571 | 602 | if (nr_vecs > UIO_MAXIOV) |
3175199a | 603 | return NULL; |
066ff571 | 604 | return kmalloc(struct_size(bio, bi_inline_vecs, nr_vecs), gfp_mask); |
3175199a CH |
605 | } |
606 | EXPORT_SYMBOL(bio_kmalloc); | |
607 | ||
649f070e | 608 | void zero_fill_bio_iter(struct bio *bio, struct bvec_iter start) |
1da177e4 | 609 | { |
7988613b KO |
610 | struct bio_vec bv; |
611 | struct bvec_iter iter; | |
1da177e4 | 612 | |
649f070e | 613 | __bio_for_each_segment(bv, bio, iter, start) |
ab6c340e | 614 | memzero_bvec(&bv); |
1da177e4 | 615 | } |
649f070e | 616 | EXPORT_SYMBOL(zero_fill_bio_iter); |
1da177e4 | 617 | |
83c9c547 ML |
618 | /** |
619 | * bio_truncate - truncate the bio to small size of @new_size | |
620 | * @bio: the bio to be truncated | |
621 | * @new_size: new size for truncating the bio | |
622 | * | |
623 | * Description: | |
624 | * Truncate the bio to new size of @new_size. If bio_op(bio) is | |
625 | * REQ_OP_READ, zero the truncated part. This function should only | |
626 | * be used for handling corner cases, such as bio eod. | |
627 | */ | |
4f7ab09a | 628 | static void bio_truncate(struct bio *bio, unsigned new_size) |
85a8ce62 ML |
629 | { |
630 | struct bio_vec bv; | |
631 | struct bvec_iter iter; | |
632 | unsigned int done = 0; | |
633 | bool truncated = false; | |
634 | ||
635 | if (new_size >= bio->bi_iter.bi_size) | |
636 | return; | |
637 | ||
83c9c547 | 638 | if (bio_op(bio) != REQ_OP_READ) |
85a8ce62 ML |
639 | goto exit; |
640 | ||
641 | bio_for_each_segment(bv, bio, iter) { | |
642 | if (done + bv.bv_len > new_size) { | |
643 | unsigned offset; | |
644 | ||
645 | if (!truncated) | |
646 | offset = new_size - done; | |
647 | else | |
648 | offset = 0; | |
3ee859e3 OH |
649 | zero_user(bv.bv_page, bv.bv_offset + offset, |
650 | bv.bv_len - offset); | |
85a8ce62 ML |
651 | truncated = true; |
652 | } | |
653 | done += bv.bv_len; | |
654 | } | |
655 | ||
656 | exit: | |
657 | /* | |
658 | * Don't touch bvec table here and make it really immutable, since | |
659 | * fs bio user has to retrieve all pages via bio_for_each_segment_all | |
660 | * in its .end_bio() callback. | |
661 | * | |
662 | * It is enough to truncate bio by updating .bi_size since we can make | |
663 | * correct bvec with the updated .bi_size for drivers. | |
664 | */ | |
665 | bio->bi_iter.bi_size = new_size; | |
666 | } | |
667 | ||
29125ed6 CH |
668 | /** |
669 | * guard_bio_eod - truncate a BIO to fit the block device | |
670 | * @bio: bio to truncate | |
671 | * | |
672 | * This allows us to do IO even on the odd last sectors of a device, even if the | |
673 | * block size is some multiple of the physical sector size. | |
674 | * | |
675 | * We'll just truncate the bio to the size of the device, and clear the end of | |
676 | * the buffer head manually. Truly out-of-range accesses will turn into actual | |
677 | * I/O errors, this only handles the "we need to be able to do I/O at the final | |
678 | * sector" case. | |
679 | */ | |
680 | void guard_bio_eod(struct bio *bio) | |
681 | { | |
309dca30 | 682 | sector_t maxsector = bdev_nr_sectors(bio->bi_bdev); |
29125ed6 CH |
683 | |
684 | if (!maxsector) | |
685 | return; | |
686 | ||
687 | /* | |
688 | * If the *whole* IO is past the end of the device, | |
689 | * let it through, and the IO layer will turn it into | |
690 | * an EIO. | |
691 | */ | |
692 | if (unlikely(bio->bi_iter.bi_sector >= maxsector)) | |
693 | return; | |
694 | ||
695 | maxsector -= bio->bi_iter.bi_sector; | |
696 | if (likely((bio->bi_iter.bi_size >> 9) <= maxsector)) | |
697 | return; | |
698 | ||
699 | bio_truncate(bio, maxsector << 9); | |
700 | } | |
701 | ||
b99182c5 PB |
702 | static int __bio_alloc_cache_prune(struct bio_alloc_cache *cache, |
703 | unsigned int nr) | |
be4d234d JA |
704 | { |
705 | unsigned int i = 0; | |
706 | struct bio *bio; | |
707 | ||
fcade2ce JA |
708 | while ((bio = cache->free_list) != NULL) { |
709 | cache->free_list = bio->bi_next; | |
be4d234d JA |
710 | cache->nr--; |
711 | bio_free(bio); | |
712 | if (++i == nr) | |
713 | break; | |
714 | } | |
b99182c5 PB |
715 | return i; |
716 | } | |
717 | ||
718 | static void bio_alloc_cache_prune(struct bio_alloc_cache *cache, | |
719 | unsigned int nr) | |
720 | { | |
721 | nr -= __bio_alloc_cache_prune(cache, nr); | |
722 | if (!READ_ONCE(cache->free_list)) { | |
723 | bio_alloc_irq_cache_splice(cache); | |
724 | __bio_alloc_cache_prune(cache, nr); | |
725 | } | |
be4d234d JA |
726 | } |
727 | ||
728 | static int bio_cpu_dead(unsigned int cpu, struct hlist_node *node) | |
729 | { | |
730 | struct bio_set *bs; | |
731 | ||
732 | bs = hlist_entry_safe(node, struct bio_set, cpuhp_dead); | |
733 | if (bs->cache) { | |
734 | struct bio_alloc_cache *cache = per_cpu_ptr(bs->cache, cpu); | |
735 | ||
736 | bio_alloc_cache_prune(cache, -1U); | |
737 | } | |
738 | return 0; | |
739 | } | |
740 | ||
741 | static void bio_alloc_cache_destroy(struct bio_set *bs) | |
742 | { | |
743 | int cpu; | |
744 | ||
745 | if (!bs->cache) | |
746 | return; | |
747 | ||
748 | cpuhp_state_remove_instance_nocalls(CPUHP_BIO_DEAD, &bs->cpuhp_dead); | |
749 | for_each_possible_cpu(cpu) { | |
750 | struct bio_alloc_cache *cache; | |
751 | ||
752 | cache = per_cpu_ptr(bs->cache, cpu); | |
753 | bio_alloc_cache_prune(cache, -1U); | |
754 | } | |
755 | free_percpu(bs->cache); | |
605f7415 | 756 | bs->cache = NULL; |
be4d234d JA |
757 | } |
758 | ||
f25cf75a PB |
759 | static inline void bio_put_percpu_cache(struct bio *bio) |
760 | { | |
761 | struct bio_alloc_cache *cache; | |
762 | ||
763 | cache = per_cpu_ptr(bio->bi_pool->cache, get_cpu()); | |
e516c3fc PB |
764 | if (READ_ONCE(cache->nr_irq) + cache->nr > ALLOC_CACHE_MAX) |
765 | goto out_free; | |
f25cf75a | 766 | |
c9f5f3aa | 767 | if (in_task()) { |
e516c3fc | 768 | bio_uninit(bio); |
f25cf75a | 769 | bio->bi_next = cache->free_list; |
c9f5f3aa | 770 | /* Not necessary but helps not to iopoll already freed bios */ |
11eb695f | 771 | bio->bi_bdev = NULL; |
f25cf75a PB |
772 | cache->free_list = bio; |
773 | cache->nr++; | |
e516c3fc PB |
774 | } else if (in_hardirq()) { |
775 | lockdep_assert_irqs_disabled(); | |
f25cf75a | 776 | |
e516c3fc | 777 | bio_uninit(bio); |
b99182c5 PB |
778 | bio->bi_next = cache->free_list_irq; |
779 | cache->free_list_irq = bio; | |
780 | cache->nr_irq++; | |
e516c3fc PB |
781 | } else { |
782 | goto out_free; | |
b99182c5 | 783 | } |
f25cf75a | 784 | put_cpu(); |
e516c3fc PB |
785 | return; |
786 | out_free: | |
787 | put_cpu(); | |
788 | bio_free(bio); | |
f25cf75a PB |
789 | } |
790 | ||
1da177e4 LT |
791 | /** |
792 | * bio_put - release a reference to a bio | |
793 | * @bio: bio to release reference to | |
794 | * | |
795 | * Description: | |
796 | * Put a reference to a &struct bio, either one you have gotten with | |
9b10f6a9 | 797 | * bio_alloc, bio_get or bio_clone_*. The last put of a bio will free it. |
1da177e4 LT |
798 | **/ |
799 | void bio_put(struct bio *bio) | |
800 | { | |
be4d234d | 801 | if (unlikely(bio_flagged(bio, BIO_REFFED))) { |
9e8c0d0d | 802 | BUG_ON(!atomic_read(&bio->__bi_cnt)); |
be4d234d JA |
803 | if (!atomic_dec_and_test(&bio->__bi_cnt)) |
804 | return; | |
805 | } | |
f25cf75a PB |
806 | if (bio->bi_opf & REQ_ALLOC_CACHE) |
807 | bio_put_percpu_cache(bio); | |
808 | else | |
be4d234d | 809 | bio_free(bio); |
1da177e4 | 810 | } |
a112a71d | 811 | EXPORT_SYMBOL(bio_put); |
1da177e4 | 812 | |
a0e8de79 | 813 | static int __bio_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp) |
59d276fe | 814 | { |
b7c44ed9 | 815 | bio_set_flag(bio, BIO_CLONED); |
ca474b73 | 816 | bio->bi_ioprio = bio_src->bi_ioprio; |
59d276fe | 817 | bio->bi_iter = bio_src->bi_iter; |
20bd723e | 818 | |
7ecc56c6 CH |
819 | if (bio->bi_bdev) { |
820 | if (bio->bi_bdev == bio_src->bi_bdev && | |
821 | bio_flagged(bio_src, BIO_REMAPPED)) | |
822 | bio_set_flag(bio, BIO_REMAPPED); | |
823 | bio_clone_blkg_association(bio, bio_src); | |
824 | } | |
56b4b5ab CH |
825 | |
826 | if (bio_crypt_clone(bio, bio_src, gfp) < 0) | |
827 | return -ENOMEM; | |
828 | if (bio_integrity(bio_src) && | |
829 | bio_integrity_clone(bio, bio_src, gfp) < 0) | |
830 | return -ENOMEM; | |
831 | return 0; | |
59d276fe | 832 | } |
59d276fe KO |
833 | |
834 | /** | |
abfc426d CH |
835 | * bio_alloc_clone - clone a bio that shares the original bio's biovec |
836 | * @bdev: block_device to clone onto | |
a0e8de79 CH |
837 | * @bio_src: bio to clone from |
838 | * @gfp: allocation priority | |
839 | * @bs: bio_set to allocate from | |
59d276fe | 840 | * |
a0e8de79 CH |
841 | * Allocate a new bio that is a clone of @bio_src. The caller owns the returned |
842 | * bio, but not the actual data it points to. | |
843 | * | |
844 | * The caller must ensure that the return bio is not freed before @bio_src. | |
59d276fe | 845 | */ |
abfc426d CH |
846 | struct bio *bio_alloc_clone(struct block_device *bdev, struct bio *bio_src, |
847 | gfp_t gfp, struct bio_set *bs) | |
59d276fe | 848 | { |
a0e8de79 | 849 | struct bio *bio; |
59d276fe | 850 | |
abfc426d | 851 | bio = bio_alloc_bioset(bdev, 0, bio_src->bi_opf, gfp, bs); |
a0e8de79 | 852 | if (!bio) |
59d276fe KO |
853 | return NULL; |
854 | ||
a0e8de79 CH |
855 | if (__bio_clone(bio, bio_src, gfp) < 0) { |
856 | bio_put(bio); | |
56b4b5ab CH |
857 | return NULL; |
858 | } | |
a0e8de79 | 859 | bio->bi_io_vec = bio_src->bi_io_vec; |
59d276fe | 860 | |
a0e8de79 | 861 | return bio; |
59d276fe | 862 | } |
abfc426d | 863 | EXPORT_SYMBOL(bio_alloc_clone); |
59d276fe | 864 | |
a0e8de79 | 865 | /** |
abfc426d CH |
866 | * bio_init_clone - clone a bio that shares the original bio's biovec |
867 | * @bdev: block_device to clone onto | |
a0e8de79 CH |
868 | * @bio: bio to clone into |
869 | * @bio_src: bio to clone from | |
870 | * @gfp: allocation priority | |
871 | * | |
872 | * Initialize a new bio in caller provided memory that is a clone of @bio_src. | |
873 | * The caller owns the returned bio, but not the actual data it points to. | |
874 | * | |
875 | * The caller must ensure that @bio_src is not freed before @bio. | |
876 | */ | |
abfc426d CH |
877 | int bio_init_clone(struct block_device *bdev, struct bio *bio, |
878 | struct bio *bio_src, gfp_t gfp) | |
a0e8de79 CH |
879 | { |
880 | int ret; | |
881 | ||
abfc426d | 882 | bio_init(bio, bdev, bio_src->bi_io_vec, 0, bio_src->bi_opf); |
a0e8de79 CH |
883 | ret = __bio_clone(bio, bio_src, gfp); |
884 | if (ret) | |
885 | bio_uninit(bio); | |
886 | return ret; | |
887 | } | |
abfc426d | 888 | EXPORT_SYMBOL(bio_init_clone); |
a0e8de79 | 889 | |
9a6083be CH |
890 | /** |
891 | * bio_full - check if the bio is full | |
892 | * @bio: bio to check | |
893 | * @len: length of one segment to be added | |
894 | * | |
895 | * Return true if @bio is full and one segment with @len bytes can't be | |
896 | * added to the bio, otherwise return false | |
897 | */ | |
898 | static inline bool bio_full(struct bio *bio, unsigned len) | |
899 | { | |
900 | if (bio->bi_vcnt >= bio->bi_max_vecs) | |
901 | return true; | |
902 | if (bio->bi_iter.bi_size > UINT_MAX - len) | |
903 | return true; | |
904 | return false; | |
905 | } | |
906 | ||
858c708d CH |
907 | static bool bvec_try_merge_page(struct bio_vec *bv, struct page *page, |
908 | unsigned int len, unsigned int off, bool *same_page) | |
5919482e | 909 | { |
d8166519 MWO |
910 | size_t bv_end = bv->bv_offset + bv->bv_len; |
911 | phys_addr_t vec_end_addr = page_to_phys(bv->bv_page) + bv_end - 1; | |
5919482e ML |
912 | phys_addr_t page_addr = page_to_phys(page); |
913 | ||
914 | if (vec_end_addr + 1 != page_addr + off) | |
915 | return false; | |
916 | if (xen_domain() && !xen_biovec_phys_mergeable(bv, page)) | |
917 | return false; | |
49580e69 LG |
918 | if (!zone_device_pages_have_same_pgmap(bv->bv_page, page)) |
919 | return false; | |
52d52d1c | 920 | |
ff896738 | 921 | *same_page = ((vec_end_addr & PAGE_MASK) == page_addr); |
858c708d CH |
922 | if (!*same_page) { |
923 | if (IS_ENABLED(CONFIG_KMSAN)) | |
924 | return false; | |
925 | if (bv->bv_page + bv_end / PAGE_SIZE != page + off / PAGE_SIZE) | |
926 | return false; | |
927 | } | |
0eca8b6f | 928 | |
0eca8b6f | 929 | bv->bv_len += len; |
0eca8b6f | 930 | return true; |
9774b391 CH |
931 | } |
932 | ||
e4581105 CH |
933 | /* |
934 | * Try to merge a page into a segment, while obeying the hardware segment | |
935 | * size limit. This is not for normal read/write bios, but for passthrough | |
936 | * or Zone Append operations that we can't split. | |
937 | */ | |
7c8998f7 | 938 | bool bvec_try_merge_hw_page(struct request_queue *q, struct bio_vec *bv, |
ae42f0b3 CH |
939 | struct page *page, unsigned len, unsigned offset, |
940 | bool *same_page) | |
489fbbcb ML |
941 | { |
942 | unsigned long mask = queue_segment_boundary(q); | |
943 | phys_addr_t addr1 = page_to_phys(bv->bv_page) + bv->bv_offset; | |
944 | phys_addr_t addr2 = page_to_phys(page) + offset + len - 1; | |
945 | ||
946 | if ((addr1 | mask) != (addr2 | mask)) | |
947 | return false; | |
3f034c37 | 948 | if (len > queue_max_segment_size(q) - bv->bv_len) |
489fbbcb | 949 | return false; |
858c708d | 950 | return bvec_try_merge_page(bv, page, len, offset, same_page); |
489fbbcb ML |
951 | } |
952 | ||
1da177e4 | 953 | /** |
e4581105 CH |
954 | * bio_add_hw_page - attempt to add a page to a bio with hw constraints |
955 | * @q: the target queue | |
956 | * @bio: destination bio | |
957 | * @page: page to add | |
958 | * @len: vec entry length | |
959 | * @offset: vec entry offset | |
960 | * @max_sectors: maximum number of sectors that can be added | |
961 | * @same_page: return if the segment has been merged inside the same page | |
c66a14d0 | 962 | * |
e4581105 CH |
963 | * Add a page to a bio while respecting the hardware max_sectors, max_segment |
964 | * and gap limitations. | |
1da177e4 | 965 | */ |
e4581105 | 966 | int bio_add_hw_page(struct request_queue *q, struct bio *bio, |
19047087 | 967 | struct page *page, unsigned int len, unsigned int offset, |
e4581105 | 968 | unsigned int max_sectors, bool *same_page) |
1da177e4 | 969 | { |
6ef02df1 CH |
970 | unsigned int max_size = max_sectors << SECTOR_SHIFT; |
971 | ||
e4581105 | 972 | if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED))) |
1da177e4 LT |
973 | return 0; |
974 | ||
6ef02df1 CH |
975 | len = min3(len, max_size, queue_max_segment_size(q)); |
976 | if (len > max_size - bio->bi_iter.bi_size) | |
1da177e4 LT |
977 | return 0; |
978 | ||
80cfd548 | 979 | if (bio->bi_vcnt > 0) { |
ae42f0b3 CH |
980 | struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1]; |
981 | ||
982 | if (bvec_try_merge_hw_page(q, bv, page, len, offset, | |
858c708d CH |
983 | same_page)) { |
984 | bio->bi_iter.bi_size += len; | |
384209cd | 985 | return len; |
858c708d | 986 | } |
320ea869 | 987 | |
cd1d83e2 CH |
988 | if (bio->bi_vcnt >= |
989 | min(bio->bi_max_vecs, queue_max_segments(q))) | |
990 | return 0; | |
991 | ||
320ea869 CH |
992 | /* |
993 | * If the queue doesn't support SG gaps and adding this segment | |
994 | * would create a gap, disallow it. | |
995 | */ | |
ae42f0b3 | 996 | if (bvec_gap_to_prev(&q->limits, bv, offset)) |
320ea869 | 997 | return 0; |
80cfd548 JA |
998 | } |
999 | ||
d58cdfae | 1000 | bvec_set_page(&bio->bi_io_vec[bio->bi_vcnt], page, len, offset); |
fcbf6a08 | 1001 | bio->bi_vcnt++; |
dcdca753 | 1002 | bio->bi_iter.bi_size += len; |
1da177e4 LT |
1003 | return len; |
1004 | } | |
19047087 | 1005 | |
e4581105 CH |
1006 | /** |
1007 | * bio_add_pc_page - attempt to add page to passthrough bio | |
1008 | * @q: the target queue | |
1009 | * @bio: destination bio | |
1010 | * @page: page to add | |
1011 | * @len: vec entry length | |
1012 | * @offset: vec entry offset | |
1013 | * | |
1014 | * Attempt to add a page to the bio_vec maplist. This can fail for a | |
1015 | * number of reasons, such as the bio being full or target block device | |
1016 | * limitations. The target block device must allow bio's up to PAGE_SIZE, | |
1017 | * so it is always possible to add a single page to an empty bio. | |
1018 | * | |
1019 | * This should only be used by passthrough bios. | |
1020 | */ | |
19047087 ML |
1021 | int bio_add_pc_page(struct request_queue *q, struct bio *bio, |
1022 | struct page *page, unsigned int len, unsigned int offset) | |
1023 | { | |
d1916c86 | 1024 | bool same_page = false; |
e4581105 CH |
1025 | return bio_add_hw_page(q, bio, page, len, offset, |
1026 | queue_max_hw_sectors(q), &same_page); | |
19047087 | 1027 | } |
a112a71d | 1028 | EXPORT_SYMBOL(bio_add_pc_page); |
6e68af66 | 1029 | |
ae29333f JT |
1030 | /** |
1031 | * bio_add_zone_append_page - attempt to add page to zone-append bio | |
1032 | * @bio: destination bio | |
1033 | * @page: page to add | |
1034 | * @len: vec entry length | |
1035 | * @offset: vec entry offset | |
1036 | * | |
1037 | * Attempt to add a page to the bio_vec maplist of a bio that will be submitted | |
1038 | * for a zone-append request. This can fail for a number of reasons, such as the | |
1039 | * bio being full or the target block device is not a zoned block device or | |
1040 | * other limitations of the target block device. The target block device must | |
1041 | * allow bio's up to PAGE_SIZE, so it is always possible to add a single page | |
1042 | * to an empty bio. | |
1043 | * | |
1044 | * Returns: number of bytes added to the bio, or 0 in case of a failure. | |
1045 | */ | |
1046 | int bio_add_zone_append_page(struct bio *bio, struct page *page, | |
1047 | unsigned int len, unsigned int offset) | |
1048 | { | |
3caee463 | 1049 | struct request_queue *q = bdev_get_queue(bio->bi_bdev); |
ae29333f JT |
1050 | bool same_page = false; |
1051 | ||
1052 | if (WARN_ON_ONCE(bio_op(bio) != REQ_OP_ZONE_APPEND)) | |
1053 | return 0; | |
1054 | ||
edd1dbc8 | 1055 | if (WARN_ON_ONCE(!bdev_is_zoned(bio->bi_bdev))) |
ae29333f JT |
1056 | return 0; |
1057 | ||
1058 | return bio_add_hw_page(q, bio, page, len, offset, | |
1059 | queue_max_zone_append_sectors(q), &same_page); | |
1060 | } | |
1061 | EXPORT_SYMBOL_GPL(bio_add_zone_append_page); | |
1062 | ||
0aa69fd3 | 1063 | /** |
551879a4 | 1064 | * __bio_add_page - add page(s) to a bio in a new segment |
0aa69fd3 | 1065 | * @bio: destination bio |
551879a4 ML |
1066 | * @page: start page to add |
1067 | * @len: length of the data to add, may cross pages | |
1068 | * @off: offset of the data relative to @page, may cross pages | |
0aa69fd3 CH |
1069 | * |
1070 | * Add the data at @page + @off to @bio as a new bvec. The caller must ensure | |
1071 | * that @bio has space for another bvec. | |
1072 | */ | |
1073 | void __bio_add_page(struct bio *bio, struct page *page, | |
1074 | unsigned int len, unsigned int off) | |
1075 | { | |
0aa69fd3 | 1076 | WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)); |
79d08f89 | 1077 | WARN_ON_ONCE(bio_full(bio, len)); |
0aa69fd3 | 1078 | |
d58cdfae | 1079 | bvec_set_page(&bio->bi_io_vec[bio->bi_vcnt], page, len, off); |
c66a14d0 | 1080 | bio->bi_iter.bi_size += len; |
0aa69fd3 CH |
1081 | bio->bi_vcnt++; |
1082 | } | |
1083 | EXPORT_SYMBOL_GPL(__bio_add_page); | |
1084 | ||
1085 | /** | |
551879a4 | 1086 | * bio_add_page - attempt to add page(s) to bio |
0aa69fd3 | 1087 | * @bio: destination bio |
551879a4 ML |
1088 | * @page: start page to add |
1089 | * @len: vec entry length, may cross pages | |
1090 | * @offset: vec entry offset relative to @page, may cross pages | |
0aa69fd3 | 1091 | * |
551879a4 | 1092 | * Attempt to add page(s) to the bio_vec maplist. This will only fail |
0aa69fd3 CH |
1093 | * if either bio->bi_vcnt == bio->bi_max_vecs or it's a cloned bio. |
1094 | */ | |
1095 | int bio_add_page(struct bio *bio, struct page *page, | |
1096 | unsigned int len, unsigned int offset) | |
1097 | { | |
ff896738 CH |
1098 | bool same_page = false; |
1099 | ||
939e1a37 CH |
1100 | if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED))) |
1101 | return 0; | |
61369905 CH |
1102 | if (bio->bi_iter.bi_size > UINT_MAX - len) |
1103 | return 0; | |
939e1a37 | 1104 | |
0eca8b6f | 1105 | if (bio->bi_vcnt > 0 && |
858c708d CH |
1106 | bvec_try_merge_page(&bio->bi_io_vec[bio->bi_vcnt - 1], |
1107 | page, len, offset, &same_page)) { | |
1108 | bio->bi_iter.bi_size += len; | |
0eca8b6f | 1109 | return len; |
858c708d | 1110 | } |
0eca8b6f | 1111 | |
80232b52 | 1112 | if (bio->bi_vcnt >= bio->bi_max_vecs) |
0eca8b6f CH |
1113 | return 0; |
1114 | __bio_add_page(bio, page, len, offset); | |
c66a14d0 | 1115 | return len; |
1da177e4 | 1116 | } |
a112a71d | 1117 | EXPORT_SYMBOL(bio_add_page); |
1da177e4 | 1118 | |
7a150f1e JT |
1119 | void bio_add_folio_nofail(struct bio *bio, struct folio *folio, size_t len, |
1120 | size_t off) | |
1121 | { | |
1122 | WARN_ON_ONCE(len > UINT_MAX); | |
1123 | WARN_ON_ONCE(off > UINT_MAX); | |
1124 | __bio_add_page(bio, &folio->page, len, off); | |
1125 | } | |
1126 | ||
85f5a74c MWO |
1127 | /** |
1128 | * bio_add_folio - Attempt to add part of a folio to a bio. | |
1129 | * @bio: BIO to add to. | |
1130 | * @folio: Folio to add. | |
1131 | * @len: How many bytes from the folio to add. | |
1132 | * @off: First byte in this folio to add. | |
1133 | * | |
1134 | * Filesystems that use folios can call this function instead of calling | |
1135 | * bio_add_page() for each page in the folio. If @off is bigger than | |
1136 | * PAGE_SIZE, this function can create a bio_vec that starts in a page | |
1137 | * after the bv_page. BIOs do not support folios that are 4GiB or larger. | |
1138 | * | |
1139 | * Return: Whether the addition was successful. | |
1140 | */ | |
1141 | bool bio_add_folio(struct bio *bio, struct folio *folio, size_t len, | |
1142 | size_t off) | |
1143 | { | |
1144 | if (len > UINT_MAX || off > UINT_MAX) | |
455a844d | 1145 | return false; |
85f5a74c MWO |
1146 | return bio_add_page(bio, &folio->page, len, off) > 0; |
1147 | } | |
cd57b771 | 1148 | EXPORT_SYMBOL(bio_add_folio); |
85f5a74c | 1149 | |
c809084a | 1150 | void __bio_release_pages(struct bio *bio, bool mark_dirty) |
7321ecbf | 1151 | { |
1b151e24 MWO |
1152 | struct folio_iter fi; |
1153 | ||
1154 | bio_for_each_folio_all(fi, bio) { | |
1155 | struct page *page; | |
1156 | size_t done = 0; | |
7321ecbf | 1157 | |
1b151e24 MWO |
1158 | if (mark_dirty) { |
1159 | folio_lock(fi.folio); | |
1160 | folio_mark_dirty(fi.folio); | |
1161 | folio_unlock(fi.folio); | |
1162 | } | |
1163 | page = folio_page(fi.folio, fi.offset / PAGE_SIZE); | |
1164 | do { | |
1165 | bio_release_page(bio, page++); | |
1166 | done += PAGE_SIZE; | |
1167 | } while (done < fi.length); | |
d241a95f | 1168 | } |
7321ecbf | 1169 | } |
c809084a | 1170 | EXPORT_SYMBOL_GPL(__bio_release_pages); |
7321ecbf | 1171 | |
1bb6b810 | 1172 | void bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter) |
6d0c48ae | 1173 | { |
fa5fa8ec PB |
1174 | size_t size = iov_iter_count(iter); |
1175 | ||
7a800a20 | 1176 | WARN_ON_ONCE(bio->bi_max_vecs); |
c42bca92 | 1177 | |
fa5fa8ec PB |
1178 | if (bio_op(bio) == REQ_OP_ZONE_APPEND) { |
1179 | struct request_queue *q = bdev_get_queue(bio->bi_bdev); | |
1180 | size_t max_sectors = queue_max_zone_append_sectors(q); | |
1181 | ||
1182 | size = min(size, max_sectors << SECTOR_SHIFT); | |
1183 | } | |
1184 | ||
c42bca92 | 1185 | bio->bi_vcnt = iter->nr_segs; |
c42bca92 PB |
1186 | bio->bi_io_vec = (struct bio_vec *)iter->bvec; |
1187 | bio->bi_iter.bi_bvec_done = iter->iov_offset; | |
fa5fa8ec | 1188 | bio->bi_iter.bi_size = size; |
977be012 | 1189 | bio_set_flag(bio, BIO_CLONED); |
7de55b7d | 1190 | } |
c42bca92 | 1191 | |
c58c0074 KB |
1192 | static int bio_iov_add_page(struct bio *bio, struct page *page, |
1193 | unsigned int len, unsigned int offset) | |
1194 | { | |
1195 | bool same_page = false; | |
1196 | ||
61369905 CH |
1197 | if (WARN_ON_ONCE(bio->bi_iter.bi_size > UINT_MAX - len)) |
1198 | return -EIO; | |
1199 | ||
0eca8b6f | 1200 | if (bio->bi_vcnt > 0 && |
858c708d CH |
1201 | bvec_try_merge_page(&bio->bi_io_vec[bio->bi_vcnt - 1], |
1202 | page, len, offset, &same_page)) { | |
1203 | bio->bi_iter.bi_size += len; | |
0eca8b6f CH |
1204 | if (same_page) |
1205 | bio_release_page(bio, page); | |
c58c0074 KB |
1206 | return 0; |
1207 | } | |
0eca8b6f | 1208 | __bio_add_page(bio, page, len, offset); |
c58c0074 KB |
1209 | return 0; |
1210 | } | |
1211 | ||
1212 | static int bio_iov_add_zone_append_page(struct bio *bio, struct page *page, | |
1213 | unsigned int len, unsigned int offset) | |
1214 | { | |
1215 | struct request_queue *q = bdev_get_queue(bio->bi_bdev); | |
1216 | bool same_page = false; | |
1217 | ||
1218 | if (bio_add_hw_page(q, bio, page, len, offset, | |
1219 | queue_max_zone_append_sectors(q), &same_page) != len) | |
1220 | return -EINVAL; | |
1221 | if (same_page) | |
a7e689dd | 1222 | bio_release_page(bio, page); |
c58c0074 KB |
1223 | return 0; |
1224 | } | |
1225 | ||
576ed913 CH |
1226 | #define PAGE_PTRS_PER_BVEC (sizeof(struct bio_vec) / sizeof(struct page *)) |
1227 | ||
2cefe4db | 1228 | /** |
17d51b10 | 1229 | * __bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio |
2cefe4db KO |
1230 | * @bio: bio to add pages to |
1231 | * @iter: iov iterator describing the region to be mapped | |
1232 | * | |
a7e689dd DH |
1233 | * Extracts pages from *iter and appends them to @bio's bvec array. The pages |
1234 | * will have to be cleaned up in the way indicated by the BIO_PAGE_PINNED flag. | |
1235 | * For a multi-segment *iter, this function only adds pages from the next | |
1236 | * non-empty segment of the iov iterator. | |
2cefe4db | 1237 | */ |
17d51b10 | 1238 | static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) |
2cefe4db | 1239 | { |
f62e52d1 | 1240 | iov_iter_extraction_t extraction_flags = 0; |
576ed913 CH |
1241 | unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt; |
1242 | unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt; | |
2cefe4db KO |
1243 | struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt; |
1244 | struct page **pages = (struct page **)bv; | |
576ed913 | 1245 | ssize_t size, left; |
e97424fd | 1246 | unsigned len, i = 0; |
168145f6 | 1247 | size_t offset; |
325347d9 | 1248 | int ret = 0; |
576ed913 CH |
1249 | |
1250 | /* | |
1251 | * Move page array up in the allocated memory for the bio vecs as far as | |
1252 | * possible so that we can start filling biovecs from the beginning | |
1253 | * without overwriting the temporary page array. | |
c58c0074 | 1254 | */ |
576ed913 CH |
1255 | BUILD_BUG_ON(PAGE_PTRS_PER_BVEC < 2); |
1256 | pages += entries_left * (PAGE_PTRS_PER_BVEC - 1); | |
2cefe4db | 1257 | |
5e3e3f2e | 1258 | if (bio->bi_bdev && blk_queue_pci_p2pdma(bio->bi_bdev->bd_disk->queue)) |
f62e52d1 | 1259 | extraction_flags |= ITER_ALLOW_P2PDMA; |
5e3e3f2e | 1260 | |
b1a000d3 KB |
1261 | /* |
1262 | * Each segment in the iov is required to be a block size multiple. | |
1263 | * However, we may not be able to get the entire segment if it spans | |
1264 | * more pages than bi_max_vecs allows, so we have to ALIGN_DOWN the | |
1265 | * result to ensure the bio's total size is correct. The remainder of | |
1266 | * the iov data will be picked up in the next bio iteration. | |
1267 | */ | |
a7e689dd DH |
1268 | size = iov_iter_extract_pages(iter, &pages, |
1269 | UINT_MAX - bio->bi_iter.bi_size, | |
1270 | nr_pages, extraction_flags, &offset); | |
480cb846 AV |
1271 | if (unlikely(size <= 0)) |
1272 | return size ? size : -EFAULT; | |
1273 | ||
1274 | nr_pages = DIV_ROUND_UP(offset + size, PAGE_SIZE); | |
1275 | ||
168145f6 KO |
1276 | if (bio->bi_bdev) { |
1277 | size_t trim = size & (bdev_logical_block_size(bio->bi_bdev) - 1); | |
1278 | iov_iter_revert(iter, trim); | |
1279 | size -= trim; | |
1280 | } | |
480cb846 | 1281 | |
480cb846 AV |
1282 | if (unlikely(!size)) { |
1283 | ret = -EFAULT; | |
e97424fd KB |
1284 | goto out; |
1285 | } | |
2cefe4db | 1286 | |
576ed913 CH |
1287 | for (left = size, i = 0; left > 0; left -= len, i++) { |
1288 | struct page *page = pages[i]; | |
2cefe4db | 1289 | |
576ed913 | 1290 | len = min_t(size_t, PAGE_SIZE - offset, left); |
34cdb8c8 | 1291 | if (bio_op(bio) == REQ_OP_ZONE_APPEND) { |
c58c0074 KB |
1292 | ret = bio_iov_add_zone_append_page(bio, page, len, |
1293 | offset); | |
e97424fd | 1294 | if (ret) |
34cdb8c8 | 1295 | break; |
34cdb8c8 KB |
1296 | } else |
1297 | bio_iov_add_page(bio, page, len, offset); | |
45691804 | 1298 | |
576ed913 | 1299 | offset = 0; |
2cefe4db KO |
1300 | } |
1301 | ||
480cb846 | 1302 | iov_iter_revert(iter, left); |
e97424fd KB |
1303 | out: |
1304 | while (i < nr_pages) | |
a7e689dd | 1305 | bio_release_page(bio, pages[i++]); |
e97424fd | 1306 | |
325347d9 | 1307 | return ret; |
2cefe4db | 1308 | } |
17d51b10 MW |
1309 | |
1310 | /** | |
6d0c48ae | 1311 | * bio_iov_iter_get_pages - add user or kernel pages to a bio |
17d51b10 | 1312 | * @bio: bio to add pages to |
6d0c48ae JA |
1313 | * @iter: iov iterator describing the region to be added |
1314 | * | |
1315 | * This takes either an iterator pointing to user memory, or one pointing to | |
1316 | * kernel pages (BVEC iterator). If we're adding user pages, we pin them and | |
1317 | * map them into the kernel. On IO completion, the caller should put those | |
c42bca92 PB |
1318 | * pages. For bvec based iterators bio_iov_iter_get_pages() uses the provided |
1319 | * bvecs rather than copying them. Hence anyone issuing kiocb based IO needs | |
1320 | * to ensure the bvecs and pages stay referenced until the submitted I/O is | |
1321 | * completed by a call to ->ki_complete() or returns with an error other than | |
1322 | * -EIOCBQUEUED. The caller needs to check if the bio is flagged BIO_NO_PAGE_REF | |
1323 | * on IO completion. If it isn't, then pages should be released. | |
17d51b10 | 1324 | * |
17d51b10 | 1325 | * The function tries, but does not guarantee, to pin as many pages as |
5cd3ddc1 | 1326 | * fit into the bio, or are requested in @iter, whatever is smaller. If |
6d0c48ae JA |
1327 | * MM encounters an error pinning the requested pages, it stops. Error |
1328 | * is returned only if 0 pages could be pinned. | |
17d51b10 MW |
1329 | */ |
1330 | int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) | |
1331 | { | |
c42bca92 | 1332 | int ret = 0; |
14eacf12 | 1333 | |
939e1a37 CH |
1334 | if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED))) |
1335 | return -EIO; | |
1336 | ||
c42bca92 | 1337 | if (iov_iter_is_bvec(iter)) { |
fa5fa8ec PB |
1338 | bio_iov_bvec_set(bio, iter); |
1339 | iov_iter_advance(iter, bio->bi_iter.bi_size); | |
1340 | return 0; | |
c42bca92 | 1341 | } |
17d51b10 | 1342 | |
a7e689dd DH |
1343 | if (iov_iter_extract_will_pin(iter)) |
1344 | bio_set_flag(bio, BIO_PAGE_PINNED); | |
17d51b10 | 1345 | do { |
c58c0074 | 1346 | ret = __bio_iov_iter_get_pages(bio, iter); |
79d08f89 | 1347 | } while (!ret && iov_iter_count(iter) && !bio_full(bio, 0)); |
17d51b10 | 1348 | |
14eacf12 | 1349 | return bio->bi_vcnt ? 0 : ret; |
17d51b10 | 1350 | } |
29b2a3aa | 1351 | EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages); |
2cefe4db | 1352 | |
4246a0b6 | 1353 | static void submit_bio_wait_endio(struct bio *bio) |
9e882242 | 1354 | { |
65e53aab | 1355 | complete(bio->bi_private); |
9e882242 KO |
1356 | } |
1357 | ||
1358 | /** | |
1359 | * submit_bio_wait - submit a bio, and wait until it completes | |
9e882242 KO |
1360 | * @bio: The &struct bio which describes the I/O |
1361 | * | |
1362 | * Simple wrapper around submit_bio(). Returns 0 on success, or the error from | |
1363 | * bio_endio() on failure. | |
3d289d68 JK |
1364 | * |
1365 | * WARNING: Unlike to how submit_bio() is usually used, this function does not | |
1366 | * result in bio reference to be consumed. The caller must drop the reference | |
1367 | * on his own. | |
9e882242 | 1368 | */ |
4e49ea4a | 1369 | int submit_bio_wait(struct bio *bio) |
9e882242 | 1370 | { |
309dca30 CH |
1371 | DECLARE_COMPLETION_ONSTACK_MAP(done, |
1372 | bio->bi_bdev->bd_disk->lockdep_map); | |
9e882242 | 1373 | |
65e53aab | 1374 | bio->bi_private = &done; |
9e882242 | 1375 | bio->bi_end_io = submit_bio_wait_endio; |
1eff9d32 | 1376 | bio->bi_opf |= REQ_SYNC; |
4e49ea4a | 1377 | submit_bio(bio); |
0eb4db47 | 1378 | blk_wait_io(&done); |
9e882242 | 1379 | |
65e53aab | 1380 | return blk_status_to_errno(bio->bi_status); |
9e882242 KO |
1381 | } |
1382 | EXPORT_SYMBOL(submit_bio_wait); | |
1383 | ||
d4aa57a1 | 1384 | void __bio_advance(struct bio *bio, unsigned bytes) |
054bdf64 KO |
1385 | { |
1386 | if (bio_integrity(bio)) | |
1387 | bio_integrity_advance(bio, bytes); | |
1388 | ||
a892c8d5 | 1389 | bio_crypt_advance(bio, bytes); |
4550dd6c | 1390 | bio_advance_iter(bio, &bio->bi_iter, bytes); |
054bdf64 | 1391 | } |
d4aa57a1 | 1392 | EXPORT_SYMBOL(__bio_advance); |
054bdf64 | 1393 | |
ee4b4e22 JA |
1394 | void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter, |
1395 | struct bio *src, struct bvec_iter *src_iter) | |
1396 | { | |
1397 | while (src_iter->bi_size && dst_iter->bi_size) { | |
1398 | struct bio_vec src_bv = bio_iter_iovec(src, *src_iter); | |
1399 | struct bio_vec dst_bv = bio_iter_iovec(dst, *dst_iter); | |
1400 | unsigned int bytes = min(src_bv.bv_len, dst_bv.bv_len); | |
1401 | void *src_buf = bvec_kmap_local(&src_bv); | |
1402 | void *dst_buf = bvec_kmap_local(&dst_bv); | |
1403 | ||
1404 | memcpy(dst_buf, src_buf, bytes); | |
1405 | ||
1406 | kunmap_local(dst_buf); | |
1407 | kunmap_local(src_buf); | |
1408 | ||
1409 | bio_advance_iter_single(src, src_iter, bytes); | |
1410 | bio_advance_iter_single(dst, dst_iter, bytes); | |
1411 | } | |
1412 | } | |
1413 | EXPORT_SYMBOL(bio_copy_data_iter); | |
1414 | ||
38a72dac | 1415 | /** |
45db54d5 KO |
1416 | * bio_copy_data - copy contents of data buffers from one bio to another |
1417 | * @src: source bio | |
1418 | * @dst: destination bio | |
38a72dac KO |
1419 | * |
1420 | * Stops when it reaches the end of either @src or @dst - that is, copies | |
1421 | * min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of bios). | |
1422 | */ | |
1423 | void bio_copy_data(struct bio *dst, struct bio *src) | |
1424 | { | |
45db54d5 KO |
1425 | struct bvec_iter src_iter = src->bi_iter; |
1426 | struct bvec_iter dst_iter = dst->bi_iter; | |
1427 | ||
ee4b4e22 | 1428 | bio_copy_data_iter(dst, &dst_iter, src, &src_iter); |
38a72dac | 1429 | } |
16ac3d63 KO |
1430 | EXPORT_SYMBOL(bio_copy_data); |
1431 | ||
491221f8 | 1432 | void bio_free_pages(struct bio *bio) |
1dfa0f68 CH |
1433 | { |
1434 | struct bio_vec *bvec; | |
6dc4f100 | 1435 | struct bvec_iter_all iter_all; |
1dfa0f68 | 1436 | |
2b070cfe | 1437 | bio_for_each_segment_all(bvec, bio, iter_all) |
1dfa0f68 CH |
1438 | __free_page(bvec->bv_page); |
1439 | } | |
491221f8 | 1440 | EXPORT_SYMBOL(bio_free_pages); |
1dfa0f68 | 1441 | |
1da177e4 LT |
1442 | /* |
1443 | * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions | |
1444 | * for performing direct-IO in BIOs. | |
1445 | * | |
1b151e24 | 1446 | * The problem is that we cannot run folio_mark_dirty() from interrupt context |
1da177e4 LT |
1447 | * because the required locks are not interrupt-safe. So what we can do is to |
1448 | * mark the pages dirty _before_ performing IO. And in interrupt context, | |
1449 | * check that the pages are still dirty. If so, fine. If not, redirty them | |
1450 | * in process context. | |
1451 | * | |
1da177e4 LT |
1452 | * Note that this code is very hard to test under normal circumstances because |
1453 | * direct-io pins the pages with get_user_pages(). This makes | |
1454 | * is_page_cache_freeable return false, and the VM will not clean the pages. | |
0d5c3eba | 1455 | * But other code (eg, flusher threads) could clean the pages if they are mapped |
1da177e4 LT |
1456 | * pagecache. |
1457 | * | |
1458 | * Simply disabling the call to bio_set_pages_dirty() is a good way to test the | |
1459 | * deferred bio dirtying paths. | |
1460 | */ | |
1461 | ||
1462 | /* | |
1463 | * bio_set_pages_dirty() will mark all the bio's pages as dirty. | |
1464 | */ | |
1465 | void bio_set_pages_dirty(struct bio *bio) | |
1466 | { | |
1b151e24 | 1467 | struct folio_iter fi; |
1da177e4 | 1468 | |
1b151e24 MWO |
1469 | bio_for_each_folio_all(fi, bio) { |
1470 | folio_lock(fi.folio); | |
1471 | folio_mark_dirty(fi.folio); | |
1472 | folio_unlock(fi.folio); | |
1da177e4 LT |
1473 | } |
1474 | } | |
7ba37927 | 1475 | EXPORT_SYMBOL_GPL(bio_set_pages_dirty); |
1da177e4 | 1476 | |
1da177e4 LT |
1477 | /* |
1478 | * bio_check_pages_dirty() will check that all the BIO's pages are still dirty. | |
1479 | * If they are, then fine. If, however, some pages are clean then they must | |
1480 | * have been written out during the direct-IO read. So we take another ref on | |
24d5493f | 1481 | * the BIO and re-dirty the pages in process context. |
1da177e4 LT |
1482 | * |
1483 | * It is expected that bio_check_pages_dirty() will wholly own the BIO from | |
fd363244 DH |
1484 | * here on. It will unpin each page and will run one bio_put() against the |
1485 | * BIO. | |
1da177e4 LT |
1486 | */ |
1487 | ||
65f27f38 | 1488 | static void bio_dirty_fn(struct work_struct *work); |
1da177e4 | 1489 | |
65f27f38 | 1490 | static DECLARE_WORK(bio_dirty_work, bio_dirty_fn); |
1da177e4 LT |
1491 | static DEFINE_SPINLOCK(bio_dirty_lock); |
1492 | static struct bio *bio_dirty_list; | |
1493 | ||
1494 | /* | |
1495 | * This runs in process context | |
1496 | */ | |
65f27f38 | 1497 | static void bio_dirty_fn(struct work_struct *work) |
1da177e4 | 1498 | { |
24d5493f | 1499 | struct bio *bio, *next; |
1da177e4 | 1500 | |
24d5493f CH |
1501 | spin_lock_irq(&bio_dirty_lock); |
1502 | next = bio_dirty_list; | |
1da177e4 | 1503 | bio_dirty_list = NULL; |
24d5493f | 1504 | spin_unlock_irq(&bio_dirty_lock); |
1da177e4 | 1505 | |
24d5493f CH |
1506 | while ((bio = next) != NULL) { |
1507 | next = bio->bi_private; | |
1da177e4 | 1508 | |
d241a95f | 1509 | bio_release_pages(bio, true); |
1da177e4 | 1510 | bio_put(bio); |
1da177e4 LT |
1511 | } |
1512 | } | |
1513 | ||
1514 | void bio_check_pages_dirty(struct bio *bio) | |
1515 | { | |
1b151e24 | 1516 | struct folio_iter fi; |
24d5493f | 1517 | unsigned long flags; |
1da177e4 | 1518 | |
1b151e24 MWO |
1519 | bio_for_each_folio_all(fi, bio) { |
1520 | if (!folio_test_dirty(fi.folio)) | |
24d5493f | 1521 | goto defer; |
1da177e4 LT |
1522 | } |
1523 | ||
d241a95f | 1524 | bio_release_pages(bio, false); |
24d5493f CH |
1525 | bio_put(bio); |
1526 | return; | |
1527 | defer: | |
1528 | spin_lock_irqsave(&bio_dirty_lock, flags); | |
1529 | bio->bi_private = bio_dirty_list; | |
1530 | bio_dirty_list = bio; | |
1531 | spin_unlock_irqrestore(&bio_dirty_lock, flags); | |
1532 | schedule_work(&bio_dirty_work); | |
1da177e4 | 1533 | } |
7ba37927 | 1534 | EXPORT_SYMBOL_GPL(bio_check_pages_dirty); |
1da177e4 | 1535 | |
c4cf5261 JA |
1536 | static inline bool bio_remaining_done(struct bio *bio) |
1537 | { | |
1538 | /* | |
1539 | * If we're not chaining, then ->__bi_remaining is always 1 and | |
1540 | * we always end io on the first invocation. | |
1541 | */ | |
1542 | if (!bio_flagged(bio, BIO_CHAIN)) | |
1543 | return true; | |
1544 | ||
1545 | BUG_ON(atomic_read(&bio->__bi_remaining) <= 0); | |
1546 | ||
326e1dbb | 1547 | if (atomic_dec_and_test(&bio->__bi_remaining)) { |
b7c44ed9 | 1548 | bio_clear_flag(bio, BIO_CHAIN); |
c4cf5261 | 1549 | return true; |
326e1dbb | 1550 | } |
c4cf5261 JA |
1551 | |
1552 | return false; | |
1553 | } | |
1554 | ||
1da177e4 LT |
1555 | /** |
1556 | * bio_endio - end I/O on a bio | |
1557 | * @bio: bio | |
1da177e4 LT |
1558 | * |
1559 | * Description: | |
4246a0b6 CH |
1560 | * bio_endio() will end I/O on the whole bio. bio_endio() is the preferred |
1561 | * way to end I/O on a bio. No one should call bi_end_io() directly on a | |
1562 | * bio unless they own it and thus know that it has an end_io function. | |
fbbaf700 N |
1563 | * |
1564 | * bio_endio() can be called several times on a bio that has been chained | |
1565 | * using bio_chain(). The ->bi_end_io() function will only be called the | |
60b6a7e6 | 1566 | * last time. |
1da177e4 | 1567 | **/ |
4246a0b6 | 1568 | void bio_endio(struct bio *bio) |
1da177e4 | 1569 | { |
ba8c6967 | 1570 | again: |
2b885517 | 1571 | if (!bio_remaining_done(bio)) |
ba8c6967 | 1572 | return; |
7c20f116 CH |
1573 | if (!bio_integrity_endio(bio)) |
1574 | return; | |
1da177e4 | 1575 | |
aa1b46dc | 1576 | rq_qos_done_bio(bio); |
67b42d0b | 1577 | |
60b6a7e6 | 1578 | if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) { |
3caee463 | 1579 | trace_block_bio_complete(bdev_get_queue(bio->bi_bdev), bio); |
60b6a7e6 EH |
1580 | bio_clear_flag(bio, BIO_TRACE_COMPLETION); |
1581 | } | |
1582 | ||
ba8c6967 CH |
1583 | /* |
1584 | * Need to have a real endio function for chained bios, otherwise | |
1585 | * various corner cases will break (like stacking block devices that | |
1586 | * save/restore bi_end_io) - however, we want to avoid unbounded | |
1587 | * recursion and blowing the stack. Tail call optimization would | |
1588 | * handle this, but compiling with frame pointers also disables | |
1589 | * gcc's sibling call optimization. | |
1590 | */ | |
1591 | if (bio->bi_end_io == bio_chain_endio) { | |
1592 | bio = __bio_chain_endio(bio); | |
1593 | goto again; | |
196d38bc | 1594 | } |
ba8c6967 | 1595 | |
9e234eea | 1596 | blk_throtl_bio_endio(bio); |
b222dd2f SL |
1597 | /* release cgroup info */ |
1598 | bio_uninit(bio); | |
ba8c6967 CH |
1599 | if (bio->bi_end_io) |
1600 | bio->bi_end_io(bio); | |
1da177e4 | 1601 | } |
a112a71d | 1602 | EXPORT_SYMBOL(bio_endio); |
1da177e4 | 1603 | |
20d0189b KO |
1604 | /** |
1605 | * bio_split - split a bio | |
1606 | * @bio: bio to split | |
1607 | * @sectors: number of sectors to split from the front of @bio | |
1608 | * @gfp: gfp mask | |
1609 | * @bs: bio set to allocate from | |
1610 | * | |
1611 | * Allocates and returns a new bio which represents @sectors from the start of | |
1612 | * @bio, and updates @bio to represent the remaining sectors. | |
1613 | * | |
f3f5da62 | 1614 | * Unless this is a discard request the newly allocated bio will point |
dad77584 BVA |
1615 | * to @bio's bi_io_vec. It is the caller's responsibility to ensure that |
1616 | * neither @bio nor @bs are freed before the split bio. | |
20d0189b KO |
1617 | */ |
1618 | struct bio *bio_split(struct bio *bio, int sectors, | |
1619 | gfp_t gfp, struct bio_set *bs) | |
1620 | { | |
f341a4d3 | 1621 | struct bio *split; |
20d0189b KO |
1622 | |
1623 | BUG_ON(sectors <= 0); | |
1624 | BUG_ON(sectors >= bio_sectors(bio)); | |
1625 | ||
0512a75b KB |
1626 | /* Zone append commands cannot be split */ |
1627 | if (WARN_ON_ONCE(bio_op(bio) == REQ_OP_ZONE_APPEND)) | |
1628 | return NULL; | |
1629 | ||
abfc426d | 1630 | split = bio_alloc_clone(bio->bi_bdev, bio, gfp, bs); |
20d0189b KO |
1631 | if (!split) |
1632 | return NULL; | |
1633 | ||
1634 | split->bi_iter.bi_size = sectors << 9; | |
1635 | ||
1636 | if (bio_integrity(split)) | |
fbd08e76 | 1637 | bio_integrity_trim(split); |
20d0189b KO |
1638 | |
1639 | bio_advance(bio, split->bi_iter.bi_size); | |
1640 | ||
fbbaf700 | 1641 | if (bio_flagged(bio, BIO_TRACE_COMPLETION)) |
20d59023 | 1642 | bio_set_flag(split, BIO_TRACE_COMPLETION); |
fbbaf700 | 1643 | |
20d0189b KO |
1644 | return split; |
1645 | } | |
1646 | EXPORT_SYMBOL(bio_split); | |
1647 | ||
6678d83f KO |
1648 | /** |
1649 | * bio_trim - trim a bio | |
1650 | * @bio: bio to trim | |
1651 | * @offset: number of sectors to trim from the front of @bio | |
1652 | * @size: size we want to trim @bio to, in sectors | |
e83502ca CK |
1653 | * |
1654 | * This function is typically used for bios that are cloned and submitted | |
1655 | * to the underlying device in parts. | |
6678d83f | 1656 | */ |
e83502ca | 1657 | void bio_trim(struct bio *bio, sector_t offset, sector_t size) |
6678d83f | 1658 | { |
e83502ca | 1659 | if (WARN_ON_ONCE(offset > BIO_MAX_SECTORS || size > BIO_MAX_SECTORS || |
8535c018 | 1660 | offset + size > bio_sectors(bio))) |
e83502ca | 1661 | return; |
6678d83f KO |
1662 | |
1663 | size <<= 9; | |
4f024f37 | 1664 | if (offset == 0 && size == bio->bi_iter.bi_size) |
6678d83f KO |
1665 | return; |
1666 | ||
6678d83f | 1667 | bio_advance(bio, offset << 9); |
4f024f37 | 1668 | bio->bi_iter.bi_size = size; |
376a78ab DM |
1669 | |
1670 | if (bio_integrity(bio)) | |
fbd08e76 | 1671 | bio_integrity_trim(bio); |
6678d83f KO |
1672 | } |
1673 | EXPORT_SYMBOL_GPL(bio_trim); | |
1674 | ||
1da177e4 LT |
1675 | /* |
1676 | * create memory pools for biovec's in a bio_set. | |
1677 | * use the global biovec slabs created for general use. | |
1678 | */ | |
8aa6ba2f | 1679 | int biovec_init_pool(mempool_t *pool, int pool_entries) |
1da177e4 | 1680 | { |
7a800a20 | 1681 | struct biovec_slab *bp = bvec_slabs + ARRAY_SIZE(bvec_slabs) - 1; |
1da177e4 | 1682 | |
8aa6ba2f | 1683 | return mempool_init_slab_pool(pool, pool_entries, bp->slab); |
1da177e4 LT |
1684 | } |
1685 | ||
917a38c7 KO |
1686 | /* |
1687 | * bioset_exit - exit a bioset initialized with bioset_init() | |
1688 | * | |
1689 | * May be called on a zeroed but uninitialized bioset (i.e. allocated with | |
1690 | * kzalloc()). | |
1691 | */ | |
1692 | void bioset_exit(struct bio_set *bs) | |
1da177e4 | 1693 | { |
be4d234d | 1694 | bio_alloc_cache_destroy(bs); |
df2cb6da KO |
1695 | if (bs->rescue_workqueue) |
1696 | destroy_workqueue(bs->rescue_workqueue); | |
917a38c7 | 1697 | bs->rescue_workqueue = NULL; |
df2cb6da | 1698 | |
8aa6ba2f KO |
1699 | mempool_exit(&bs->bio_pool); |
1700 | mempool_exit(&bs->bvec_pool); | |
9f060e22 | 1701 | |
7878cba9 | 1702 | bioset_integrity_free(bs); |
917a38c7 KO |
1703 | if (bs->bio_slab) |
1704 | bio_put_slab(bs); | |
1705 | bs->bio_slab = NULL; | |
1706 | } | |
1707 | EXPORT_SYMBOL(bioset_exit); | |
1da177e4 | 1708 | |
917a38c7 KO |
1709 | /** |
1710 | * bioset_init - Initialize a bio_set | |
dad08527 | 1711 | * @bs: pool to initialize |
917a38c7 KO |
1712 | * @pool_size: Number of bio and bio_vecs to cache in the mempool |
1713 | * @front_pad: Number of bytes to allocate in front of the returned bio | |
1714 | * @flags: Flags to modify behavior, currently %BIOSET_NEED_BVECS | |
1715 | * and %BIOSET_NEED_RESCUER | |
1716 | * | |
dad08527 KO |
1717 | * Description: |
1718 | * Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller | |
1719 | * to ask for a number of bytes to be allocated in front of the bio. | |
1720 | * Front pad allocation is useful for embedding the bio inside | |
1721 | * another structure, to avoid allocating extra data to go with the bio. | |
1722 | * Note that the bio must be embedded at the END of that structure always, | |
1723 | * or things will break badly. | |
1724 | * If %BIOSET_NEED_BVECS is set in @flags, a separate pool will be allocated | |
abfc426d CH |
1725 | * for allocating iovecs. This pool is not needed e.g. for bio_init_clone(). |
1726 | * If %BIOSET_NEED_RESCUER is set, a workqueue is created which can be used | |
1727 | * to dispatch queued requests when the mempool runs out of space. | |
dad08527 | 1728 | * |
917a38c7 KO |
1729 | */ |
1730 | int bioset_init(struct bio_set *bs, | |
1731 | unsigned int pool_size, | |
1732 | unsigned int front_pad, | |
1733 | int flags) | |
1734 | { | |
917a38c7 | 1735 | bs->front_pad = front_pad; |
9f180e31 ML |
1736 | if (flags & BIOSET_NEED_BVECS) |
1737 | bs->back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec); | |
1738 | else | |
1739 | bs->back_pad = 0; | |
917a38c7 KO |
1740 | |
1741 | spin_lock_init(&bs->rescue_lock); | |
1742 | bio_list_init(&bs->rescue_list); | |
1743 | INIT_WORK(&bs->rescue_work, bio_alloc_rescue); | |
1744 | ||
49d1ec85 | 1745 | bs->bio_slab = bio_find_or_create_slab(bs); |
917a38c7 KO |
1746 | if (!bs->bio_slab) |
1747 | return -ENOMEM; | |
1748 | ||
1749 | if (mempool_init_slab_pool(&bs->bio_pool, pool_size, bs->bio_slab)) | |
1750 | goto bad; | |
1751 | ||
1752 | if ((flags & BIOSET_NEED_BVECS) && | |
1753 | biovec_init_pool(&bs->bvec_pool, pool_size)) | |
1754 | goto bad; | |
1755 | ||
be4d234d JA |
1756 | if (flags & BIOSET_NEED_RESCUER) { |
1757 | bs->rescue_workqueue = alloc_workqueue("bioset", | |
1758 | WQ_MEM_RECLAIM, 0); | |
1759 | if (!bs->rescue_workqueue) | |
1760 | goto bad; | |
1761 | } | |
1762 | if (flags & BIOSET_PERCPU_CACHE) { | |
1763 | bs->cache = alloc_percpu(struct bio_alloc_cache); | |
1764 | if (!bs->cache) | |
1765 | goto bad; | |
1766 | cpuhp_state_add_instance_nocalls(CPUHP_BIO_DEAD, &bs->cpuhp_dead); | |
1767 | } | |
917a38c7 KO |
1768 | |
1769 | return 0; | |
1770 | bad: | |
1771 | bioset_exit(bs); | |
1772 | return -ENOMEM; | |
1773 | } | |
1774 | EXPORT_SYMBOL(bioset_init); | |
1775 | ||
de76fd89 | 1776 | static int __init init_bio(void) |
1da177e4 LT |
1777 | { |
1778 | int i; | |
1779 | ||
a3df2e45 JA |
1780 | BUILD_BUG_ON(BIO_FLAG_LAST > 8 * sizeof_field(struct bio, bi_flags)); |
1781 | ||
7878cba9 | 1782 | bio_integrity_init(); |
1da177e4 | 1783 | |
de76fd89 CH |
1784 | for (i = 0; i < ARRAY_SIZE(bvec_slabs); i++) { |
1785 | struct biovec_slab *bvs = bvec_slabs + i; | |
a7fcd37c | 1786 | |
de76fd89 CH |
1787 | bvs->slab = kmem_cache_create(bvs->name, |
1788 | bvs->nr_vecs * sizeof(struct bio_vec), 0, | |
1789 | SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); | |
1da177e4 | 1790 | } |
1da177e4 | 1791 | |
be4d234d JA |
1792 | cpuhp_setup_state_multi(CPUHP_BIO_DEAD, "block/bio:dead", NULL, |
1793 | bio_cpu_dead); | |
1794 | ||
12c5b70c JA |
1795 | if (bioset_init(&fs_bio_set, BIO_POOL_SIZE, 0, |
1796 | BIOSET_NEED_BVECS | BIOSET_PERCPU_CACHE)) | |
1da177e4 LT |
1797 | panic("bio: can't allocate bios\n"); |
1798 | ||
f4f8154a | 1799 | if (bioset_integrity_create(&fs_bio_set, BIO_POOL_SIZE)) |
a91a2785 MP |
1800 | panic("bio: can't create integrity pool\n"); |
1801 | ||
1da177e4 LT |
1802 | return 0; |
1803 | } | |
1da177e4 | 1804 | subsys_initcall(init_bio); |