Commit | Line | Data |
---|---|---|
8c16567d | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 | 2 | /* |
1da177e4 | 3 | * Copyright (C) 2001 Jens Axboe <axboe@suse.de> |
1da177e4 LT |
4 | */ |
5 | #ifndef __LINUX_BIO_H | |
6 | #define __LINUX_BIO_H | |
7 | ||
1da177e4 | 8 | #include <linux/mempool.h> |
7cc01581 TH |
9 | /* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */ |
10 | #include <linux/blk_types.h> | |
3e1a88ec | 11 | #include <linux/uio.h> |
7cc01581 | 12 | |
a8affc03 | 13 | #define BIO_MAX_VECS 256U |
5f7136db | 14 | |
fd8f8ede CH |
15 | struct queue_limits; |
16 | ||
5f7136db MWO |
17 | static inline unsigned int bio_max_segs(unsigned int nr_segs) |
18 | { | |
a8affc03 | 19 | return min(nr_segs, BIO_MAX_VECS); |
5f7136db | 20 | } |
1da177e4 | 21 | |
43b62ce3 MC |
22 | #define bio_prio(bio) (bio)->bi_ioprio |
23 | #define bio_set_prio(bio, prio) ((bio)->bi_ioprio = prio) | |
22e2c507 | 24 | |
4550dd6c KO |
25 | #define bio_iter_iovec(bio, iter) \ |
26 | bvec_iter_bvec((bio)->bi_io_vec, (iter)) | |
27 | ||
28 | #define bio_iter_page(bio, iter) \ | |
29 | bvec_iter_page((bio)->bi_io_vec, (iter)) | |
30 | #define bio_iter_len(bio, iter) \ | |
31 | bvec_iter_len((bio)->bi_io_vec, (iter)) | |
32 | #define bio_iter_offset(bio, iter) \ | |
33 | bvec_iter_offset((bio)->bi_io_vec, (iter)) | |
34 | ||
35 | #define bio_page(bio) bio_iter_page((bio), (bio)->bi_iter) | |
36 | #define bio_offset(bio) bio_iter_offset((bio), (bio)->bi_iter) | |
37 | #define bio_iovec(bio) bio_iter_iovec((bio), (bio)->bi_iter) | |
7988613b | 38 | |
38a72dac KO |
39 | #define bvec_iter_sectors(iter) ((iter).bi_size >> 9) |
40 | #define bvec_iter_end_sector(iter) ((iter).bi_sector + bvec_iter_sectors((iter))) | |
41 | ||
42 | #define bio_sectors(bio) bvec_iter_sectors((bio)->bi_iter) | |
43 | #define bio_end_sector(bio) bvec_iter_end_sector((bio)->bi_iter) | |
bf2de6f5 | 44 | |
d3849953 CH |
45 | /* |
46 | * Return the data direction, READ or WRITE. | |
47 | */ | |
48 | #define bio_data_dir(bio) \ | |
49 | (op_is_write(bio_op(bio)) ? WRITE : READ) | |
50 | ||
458b76ed KO |
51 | /* |
52 | * Check whether this bio carries any data or not. A NULL bio is allowed. | |
53 | */ | |
54 | static inline bool bio_has_data(struct bio *bio) | |
55 | { | |
56 | if (bio && | |
57 | bio->bi_iter.bi_size && | |
7afafc8a | 58 | bio_op(bio) != REQ_OP_DISCARD && |
a6f0788e CK |
59 | bio_op(bio) != REQ_OP_SECURE_ERASE && |
60 | bio_op(bio) != REQ_OP_WRITE_ZEROES) | |
458b76ed KO |
61 | return true; |
62 | ||
63 | return false; | |
64 | } | |
65 | ||
c1527c0e | 66 | static inline bool bio_no_advance_iter(const struct bio *bio) |
95fe6c1a | 67 | { |
7afafc8a AH |
68 | return bio_op(bio) == REQ_OP_DISCARD || |
69 | bio_op(bio) == REQ_OP_SECURE_ERASE || | |
a6f0788e | 70 | bio_op(bio) == REQ_OP_WRITE_ZEROES; |
95fe6c1a MC |
71 | } |
72 | ||
bf2de6f5 JA |
73 | static inline void *bio_data(struct bio *bio) |
74 | { | |
458b76ed | 75 | if (bio_has_data(bio)) |
bf2de6f5 JA |
76 | return page_address(bio_page(bio)) + bio_offset(bio); |
77 | ||
78 | return NULL; | |
79 | } | |
1da177e4 | 80 | |
1200e07f ML |
81 | static inline bool bio_next_segment(const struct bio *bio, |
82 | struct bvec_iter_all *iter) | |
83 | { | |
84 | if (iter->idx >= bio->bi_vcnt) | |
85 | return false; | |
86 | ||
87 | bvec_advance(&bio->bi_io_vec[iter->idx], iter); | |
88 | return true; | |
89 | } | |
6dc4f100 | 90 | |
d74c6d51 KO |
91 | /* |
92 | * drivers should _never_ use the all version - the bio may have been split | |
93 | * before it got to the driver and the driver won't own all of it | |
94 | */ | |
2b070cfe CH |
95 | #define bio_for_each_segment_all(bvl, bio, iter) \ |
96 | for (bvl = bvec_init_iter_all(&iter); bio_next_segment((bio), &iter); ) | |
d74c6d51 | 97 | |
c1527c0e BVA |
98 | static inline void bio_advance_iter(const struct bio *bio, |
99 | struct bvec_iter *iter, unsigned int bytes) | |
4550dd6c KO |
100 | { |
101 | iter->bi_sector += bytes >> 9; | |
102 | ||
7759eb23 | 103 | if (bio_no_advance_iter(bio)) |
4550dd6c | 104 | iter->bi_size -= bytes; |
7759eb23 | 105 | else |
4550dd6c | 106 | bvec_iter_advance(bio->bi_io_vec, iter, bytes); |
b1fb2c52 | 107 | /* TODO: It is reasonable to complete bio with error here. */ |
f9df1cd9 DM |
108 | } |
109 | ||
22b56c29 PB |
110 | /* @bytes should be less or equal to bvec[i->bi_idx].bv_len */ |
111 | static inline void bio_advance_iter_single(const struct bio *bio, | |
112 | struct bvec_iter *iter, | |
113 | unsigned int bytes) | |
114 | { | |
115 | iter->bi_sector += bytes >> 9; | |
116 | ||
117 | if (bio_no_advance_iter(bio)) | |
118 | iter->bi_size -= bytes; | |
119 | else | |
120 | bvec_iter_advance_single(bio->bi_io_vec, iter, bytes); | |
121 | } | |
122 | ||
d4aa57a1 JA |
123 | void __bio_advance(struct bio *, unsigned bytes); |
124 | ||
125 | /** | |
126 | * bio_advance - increment/complete a bio by some number of bytes | |
127 | * @bio: bio to advance | |
6fd3c510 | 128 | * @nbytes: number of bytes to complete |
d4aa57a1 JA |
129 | * |
130 | * This updates bi_sector, bi_size and bi_idx; if the number of bytes to | |
131 | * complete doesn't align with a bvec boundary, then bv_len and bv_offset will | |
132 | * be updated on the last bvec as well. | |
133 | * | |
134 | * @bio will then represent the remaining, uncompleted portion of the io. | |
135 | */ | |
136 | static inline void bio_advance(struct bio *bio, unsigned int nbytes) | |
137 | { | |
138 | if (nbytes == bio->bi_iter.bi_size) { | |
139 | bio->bi_iter.bi_size = 0; | |
140 | return; | |
141 | } | |
142 | __bio_advance(bio, nbytes); | |
143 | } | |
144 | ||
7988613b KO |
145 | #define __bio_for_each_segment(bvl, bio, iter, start) \ |
146 | for (iter = (start); \ | |
4550dd6c KO |
147 | (iter).bi_size && \ |
148 | ((bvl = bio_iter_iovec((bio), (iter))), 1); \ | |
22b56c29 | 149 | bio_advance_iter_single((bio), &(iter), (bvl).bv_len)) |
7988613b KO |
150 | |
151 | #define bio_for_each_segment(bvl, bio, iter) \ | |
152 | __bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter) | |
153 | ||
d18d9174 ML |
154 | #define __bio_for_each_bvec(bvl, bio, iter, start) \ |
155 | for (iter = (start); \ | |
156 | (iter).bi_size && \ | |
157 | ((bvl = mp_bvec_iter_bvec((bio)->bi_io_vec, (iter))), 1); \ | |
22b56c29 | 158 | bio_advance_iter_single((bio), &(iter), (bvl).bv_len)) |
d18d9174 ML |
159 | |
160 | /* iterate over multi-page bvec */ | |
161 | #define bio_for_each_bvec(bvl, bio, iter) \ | |
162 | __bio_for_each_bvec(bvl, bio, iter, (bio)->bi_iter) | |
163 | ||
1072c12d OS |
164 | /* |
165 | * Iterate over all multi-page bvecs. Drivers shouldn't use this version for the | |
166 | * same reasons as bio_for_each_segment_all(). | |
167 | */ | |
168 | #define bio_for_each_bvec_all(bvl, bio, i) \ | |
169 | for (i = 0, bvl = bio_first_bvec_all(bio); \ | |
640d1930 | 170 | i < (bio)->bi_vcnt; i++, bvl++) |
1072c12d | 171 | |
4550dd6c | 172 | #define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len) |
1da177e4 | 173 | |
f4595875 | 174 | static inline unsigned bio_segments(struct bio *bio) |
458b76ed KO |
175 | { |
176 | unsigned segs = 0; | |
177 | struct bio_vec bv; | |
178 | struct bvec_iter iter; | |
179 | ||
8423ae3d | 180 | /* |
a6f0788e CK |
181 | * We special case discard/write same/write zeroes, because they |
182 | * interpret bi_size differently: | |
8423ae3d KO |
183 | */ |
184 | ||
a6f0788e CK |
185 | switch (bio_op(bio)) { |
186 | case REQ_OP_DISCARD: | |
187 | case REQ_OP_SECURE_ERASE: | |
a6f0788e | 188 | case REQ_OP_WRITE_ZEROES: |
f9d03f96 | 189 | return 0; |
a6f0788e CK |
190 | default: |
191 | break; | |
192 | } | |
8423ae3d | 193 | |
f4595875 | 194 | bio_for_each_segment(bv, bio, iter) |
458b76ed KO |
195 | segs++; |
196 | ||
197 | return segs; | |
198 | } | |
199 | ||
1da177e4 LT |
200 | /* |
201 | * get a reference to a bio, so it won't disappear. the intended use is | |
202 | * something like: | |
203 | * | |
204 | * bio_get(bio); | |
205 | * submit_bio(rw, bio); | |
206 | * if (bio->bi_flags ...) | |
207 | * do_something | |
208 | * bio_put(bio); | |
209 | * | |
210 | * without the bio_get(), it could potentially complete I/O before submit_bio | |
211 | * returns. and then bio would be freed memory when if (bio->bi_flags ...) | |
212 | * runs | |
213 | */ | |
dac56212 JA |
214 | static inline void bio_get(struct bio *bio) |
215 | { | |
216 | bio->bi_flags |= (1 << BIO_REFFED); | |
217 | smp_mb__before_atomic(); | |
218 | atomic_inc(&bio->__bi_cnt); | |
219 | } | |
220 | ||
221 | static inline void bio_cnt_set(struct bio *bio, unsigned int count) | |
222 | { | |
223 | if (count != 1) { | |
224 | bio->bi_flags |= (1 << BIO_REFFED); | |
f381c6a4 | 225 | smp_mb(); |
dac56212 JA |
226 | } |
227 | atomic_set(&bio->__bi_cnt, count); | |
228 | } | |
1da177e4 | 229 | |
b7c44ed9 JA |
230 | static inline bool bio_flagged(struct bio *bio, unsigned int bit) |
231 | { | |
2c68f6dc | 232 | return (bio->bi_flags & (1U << bit)) != 0; |
b7c44ed9 JA |
233 | } |
234 | ||
235 | static inline void bio_set_flag(struct bio *bio, unsigned int bit) | |
236 | { | |
2c68f6dc | 237 | bio->bi_flags |= (1U << bit); |
b7c44ed9 JA |
238 | } |
239 | ||
240 | static inline void bio_clear_flag(struct bio *bio, unsigned int bit) | |
241 | { | |
2c68f6dc | 242 | bio->bi_flags &= ~(1U << bit); |
b7c44ed9 JA |
243 | } |
244 | ||
86292abc ML |
245 | static inline struct bio_vec *bio_first_bvec_all(struct bio *bio) |
246 | { | |
247 | WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)); | |
248 | return bio->bi_io_vec; | |
249 | } | |
250 | ||
251 | static inline struct page *bio_first_page_all(struct bio *bio) | |
252 | { | |
253 | return bio_first_bvec_all(bio)->bv_page; | |
254 | } | |
255 | ||
256 | static inline struct bio_vec *bio_last_bvec_all(struct bio *bio) | |
257 | { | |
258 | WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)); | |
259 | return &bio->bi_io_vec[bio->bi_vcnt - 1]; | |
260 | } | |
261 | ||
640d1930 MWO |
262 | /** |
263 | * struct folio_iter - State for iterating all folios in a bio. | |
264 | * @folio: The current folio we're iterating. NULL after the last folio. | |
265 | * @offset: The byte offset within the current folio. | |
266 | * @length: The number of bytes in this iteration (will not cross folio | |
267 | * boundary). | |
268 | */ | |
269 | struct folio_iter { | |
270 | struct folio *folio; | |
271 | size_t offset; | |
272 | size_t length; | |
273 | /* private: for use by the iterator */ | |
170f37d6 | 274 | struct folio *_next; |
640d1930 MWO |
275 | size_t _seg_count; |
276 | int _i; | |
277 | }; | |
278 | ||
279 | static inline void bio_first_folio(struct folio_iter *fi, struct bio *bio, | |
280 | int i) | |
281 | { | |
282 | struct bio_vec *bvec = bio_first_bvec_all(bio) + i; | |
283 | ||
284 | fi->folio = page_folio(bvec->bv_page); | |
285 | fi->offset = bvec->bv_offset + | |
286 | PAGE_SIZE * (bvec->bv_page - &fi->folio->page); | |
287 | fi->_seg_count = bvec->bv_len; | |
288 | fi->length = min(folio_size(fi->folio) - fi->offset, fi->_seg_count); | |
170f37d6 | 289 | fi->_next = folio_next(fi->folio); |
640d1930 MWO |
290 | fi->_i = i; |
291 | } | |
292 | ||
293 | static inline void bio_next_folio(struct folio_iter *fi, struct bio *bio) | |
294 | { | |
295 | fi->_seg_count -= fi->length; | |
296 | if (fi->_seg_count) { | |
170f37d6 | 297 | fi->folio = fi->_next; |
640d1930 MWO |
298 | fi->offset = 0; |
299 | fi->length = min(folio_size(fi->folio), fi->_seg_count); | |
170f37d6 | 300 | fi->_next = folio_next(fi->folio); |
640d1930 MWO |
301 | } else if (fi->_i + 1 < bio->bi_vcnt) { |
302 | bio_first_folio(fi, bio, fi->_i + 1); | |
303 | } else { | |
304 | fi->folio = NULL; | |
305 | } | |
306 | } | |
307 | ||
308 | /** | |
309 | * bio_for_each_folio_all - Iterate over each folio in a bio. | |
310 | * @fi: struct folio_iter which is updated for each folio. | |
311 | * @bio: struct bio to iterate over. | |
312 | */ | |
313 | #define bio_for_each_folio_all(fi, bio) \ | |
314 | for (bio_first_folio(&fi, bio, 0); fi.folio; bio_next_folio(&fi, bio)) | |
315 | ||
c611529e MP |
316 | enum bip_flags { |
317 | BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */ | |
318 | BIP_MAPPED_INTEGRITY = 1 << 1, /* ref tag has been remapped */ | |
319 | BIP_CTRL_NOCHECK = 1 << 2, /* disable HBA integrity checking */ | |
320 | BIP_DISK_NOCHECK = 1 << 3, /* disable disk integrity checking */ | |
321 | BIP_IP_CHECKSUM = 1 << 4, /* IP checksum */ | |
322 | }; | |
323 | ||
7ba1ba12 MP |
324 | /* |
325 | * bio integrity payload | |
326 | */ | |
327 | struct bio_integrity_payload { | |
328 | struct bio *bip_bio; /* parent bio */ | |
7ba1ba12 | 329 | |
d57a5f7c | 330 | struct bvec_iter bip_iter; |
7ba1ba12 | 331 | |
7ba1ba12 | 332 | unsigned short bip_vcnt; /* # of integrity bio_vecs */ |
cbcd1054 | 333 | unsigned short bip_max_vcnt; /* integrity bio_vec slots */ |
b1f01388 | 334 | unsigned short bip_flags; /* control flags */ |
7ba1ba12 | 335 | |
7759eb23 ML |
336 | struct bvec_iter bio_iter; /* for rewinding parent bio */ |
337 | ||
7ba1ba12 | 338 | struct work_struct bip_work; /* I/O completion */ |
6fda981c KO |
339 | |
340 | struct bio_vec *bip_vec; | |
0a368bf0 | 341 | struct bio_vec bip_inline_vecs[];/* embedded bvec array */ |
7ba1ba12 | 342 | }; |
18593088 | 343 | |
06c1e390 KB |
344 | #if defined(CONFIG_BLK_DEV_INTEGRITY) |
345 | ||
346 | static inline struct bio_integrity_payload *bio_integrity(struct bio *bio) | |
347 | { | |
1eff9d32 | 348 | if (bio->bi_opf & REQ_INTEGRITY) |
06c1e390 KB |
349 | return bio->bi_integrity; |
350 | ||
351 | return NULL; | |
352 | } | |
353 | ||
c611529e MP |
354 | static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag) |
355 | { | |
356 | struct bio_integrity_payload *bip = bio_integrity(bio); | |
357 | ||
358 | if (bip) | |
359 | return bip->bip_flags & flag; | |
360 | ||
361 | return false; | |
362 | } | |
b1f01388 | 363 | |
18593088 MP |
364 | static inline sector_t bip_get_seed(struct bio_integrity_payload *bip) |
365 | { | |
366 | return bip->bip_iter.bi_sector; | |
367 | } | |
368 | ||
369 | static inline void bip_set_seed(struct bio_integrity_payload *bip, | |
370 | sector_t seed) | |
371 | { | |
372 | bip->bip_iter.bi_sector = seed; | |
373 | } | |
374 | ||
7ba1ba12 | 375 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ |
1da177e4 | 376 | |
e83502ca | 377 | void bio_trim(struct bio *bio, sector_t offset, sector_t size); |
20d0189b KO |
378 | extern struct bio *bio_split(struct bio *bio, int sectors, |
379 | gfp_t gfp, struct bio_set *bs); | |
fd8f8ede CH |
380 | struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim, |
381 | unsigned *segs, struct bio_set *bs, unsigned max_bytes); | |
20d0189b KO |
382 | |
383 | /** | |
384 | * bio_next_split - get next @sectors from a bio, splitting if necessary | |
385 | * @bio: bio to split | |
386 | * @sectors: number of sectors to split from the front of @bio | |
387 | * @gfp: gfp mask | |
388 | * @bs: bio set to allocate from | |
389 | * | |
6fd3c510 | 390 | * Return: a bio representing the next @sectors of @bio - if the bio is smaller |
20d0189b KO |
391 | * than @sectors, returns the original bio unchanged. |
392 | */ | |
393 | static inline struct bio *bio_next_split(struct bio *bio, int sectors, | |
394 | gfp_t gfp, struct bio_set *bs) | |
395 | { | |
396 | if (sectors >= bio_sectors(bio)) | |
397 | return bio; | |
398 | ||
399 | return bio_split(bio, sectors, gfp, bs); | |
400 | } | |
401 | ||
011067b0 N |
402 | enum { |
403 | BIOSET_NEED_BVECS = BIT(0), | |
47e0fb46 | 404 | BIOSET_NEED_RESCUER = BIT(1), |
be4d234d | 405 | BIOSET_PERCPU_CACHE = BIT(2), |
011067b0 | 406 | }; |
dad08527 KO |
407 | extern int bioset_init(struct bio_set *, unsigned int, unsigned int, int flags); |
408 | extern void bioset_exit(struct bio_set *); | |
8aa6ba2f | 409 | extern int biovec_init_pool(mempool_t *pool, int pool_entries); |
1da177e4 | 410 | |
609be106 | 411 | struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs, |
16458cf3 | 412 | blk_opf_t opf, gfp_t gfp_mask, |
609be106 | 413 | struct bio_set *bs); |
066ff571 | 414 | struct bio *bio_kmalloc(unsigned short nr_vecs, gfp_t gfp_mask); |
1da177e4 LT |
415 | extern void bio_put(struct bio *); |
416 | ||
abfc426d CH |
417 | struct bio *bio_alloc_clone(struct block_device *bdev, struct bio *bio_src, |
418 | gfp_t gfp, struct bio_set *bs); | |
419 | int bio_init_clone(struct block_device *bdev, struct bio *bio, | |
420 | struct bio *bio_src, gfp_t gfp); | |
bf800ef1 | 421 | |
f4f8154a | 422 | extern struct bio_set fs_bio_set; |
3f86a82a | 423 | |
07888c66 | 424 | static inline struct bio *bio_alloc(struct block_device *bdev, |
16458cf3 | 425 | unsigned short nr_vecs, blk_opf_t opf, gfp_t gfp_mask) |
3f86a82a | 426 | { |
07888c66 | 427 | return bio_alloc_bioset(bdev, nr_vecs, opf, gfp_mask, &fs_bio_set); |
3f86a82a KO |
428 | } |
429 | ||
3e08773c | 430 | void submit_bio(struct bio *bio); |
1e3914d4 | 431 | |
4246a0b6 CH |
432 | extern void bio_endio(struct bio *); |
433 | ||
434 | static inline void bio_io_error(struct bio *bio) | |
435 | { | |
4e4cbee9 | 436 | bio->bi_status = BLK_STS_IOERR; |
4246a0b6 CH |
437 | bio_endio(bio); |
438 | } | |
439 | ||
03a07c92 GR |
440 | static inline void bio_wouldblock_error(struct bio *bio) |
441 | { | |
abb30460 | 442 | bio_set_flag(bio, BIO_QUIET); |
03a07c92 | 443 | bio->bi_status = BLK_STS_AGAIN; |
4246a0b6 CH |
444 | bio_endio(bio); |
445 | } | |
446 | ||
3e1a88ec PB |
447 | /* |
448 | * Calculate number of bvec segments that should be allocated to fit data | |
c42bca92 PB |
449 | * pointed by @iter. If @iter is backed by bvec it's going to be reused |
450 | * instead of allocating a new one. | |
3e1a88ec PB |
451 | */ |
452 | static inline int bio_iov_vecs_to_alloc(struct iov_iter *iter, int max_segs) | |
453 | { | |
c42bca92 PB |
454 | if (iov_iter_is_bvec(iter)) |
455 | return 0; | |
3e1a88ec PB |
456 | return iov_iter_npages(iter, max_segs); |
457 | } | |
458 | ||
1da177e4 | 459 | struct request_queue; |
1da177e4 | 460 | |
4e49ea4a | 461 | extern int submit_bio_wait(struct bio *bio); |
49add496 | 462 | void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table, |
16458cf3 | 463 | unsigned short max_vecs, blk_opf_t opf); |
9ae3b3f5 | 464 | extern void bio_uninit(struct bio *); |
16458cf3 | 465 | void bio_reset(struct bio *bio, struct block_device *bdev, blk_opf_t opf); |
196d38bc | 466 | void bio_chain(struct bio *, struct bio *); |
1da177e4 | 467 | |
85f5a74c MWO |
468 | int bio_add_page(struct bio *, struct page *, unsigned len, unsigned off); |
469 | bool bio_add_folio(struct bio *, struct folio *, size_t len, size_t off); | |
6e68af66 MC |
470 | extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *, |
471 | unsigned int, unsigned int); | |
ae29333f JT |
472 | int bio_add_zone_append_page(struct bio *bio, struct page *page, |
473 | unsigned int len, unsigned int offset); | |
0aa69fd3 CH |
474 | void __bio_add_page(struct bio *bio, struct page *page, |
475 | unsigned int len, unsigned int off); | |
2cefe4db | 476 | int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter); |
1bb6b810 | 477 | void bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter); |
c809084a | 478 | void __bio_release_pages(struct bio *bio, bool mark_dirty); |
1da177e4 LT |
479 | extern void bio_set_pages_dirty(struct bio *bio); |
480 | extern void bio_check_pages_dirty(struct bio *bio); | |
2d4dc890 | 481 | |
ee4b4e22 JA |
482 | extern void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter, |
483 | struct bio *src, struct bvec_iter *src_iter); | |
16ac3d63 | 484 | extern void bio_copy_data(struct bio *dst, struct bio *src); |
491221f8 | 485 | extern void bio_free_pages(struct bio *bio); |
29125ed6 | 486 | void guard_bio_eod(struct bio *bio); |
6f822e1b | 487 | void zero_fill_bio(struct bio *bio); |
38a72dac | 488 | |
c809084a PB |
489 | static inline void bio_release_pages(struct bio *bio, bool mark_dirty) |
490 | { | |
491 | if (!bio_flagged(bio, BIO_NO_PAGE_REF)) | |
492 | __bio_release_pages(bio, mark_dirty); | |
493 | } | |
494 | ||
74d46992 | 495 | #define bio_dev(bio) \ |
309dca30 | 496 | disk_devt((bio)->bi_bdev->bd_disk) |
74d46992 | 497 | |
852c788f | 498 | #ifdef CONFIG_BLK_CGROUP |
2268c0fe | 499 | void bio_associate_blkg(struct bio *bio); |
fd42df30 DZ |
500 | void bio_associate_blkg_from_css(struct bio *bio, |
501 | struct cgroup_subsys_state *css); | |
db6638d7 | 502 | void bio_clone_blkg_association(struct bio *dst, struct bio *src); |
3480373e | 503 | void blkcg_punt_bio_submit(struct bio *bio); |
852c788f | 504 | #else /* CONFIG_BLK_CGROUP */ |
2268c0fe | 505 | static inline void bio_associate_blkg(struct bio *bio) { } |
fd42df30 DZ |
506 | static inline void bio_associate_blkg_from_css(struct bio *bio, |
507 | struct cgroup_subsys_state *css) | |
508 | { } | |
db6638d7 DZ |
509 | static inline void bio_clone_blkg_association(struct bio *dst, |
510 | struct bio *src) { } | |
3480373e CH |
511 | static inline void blkcg_punt_bio_submit(struct bio *bio) |
512 | { | |
513 | submit_bio(bio); | |
514 | } | |
852c788f TH |
515 | #endif /* CONFIG_BLK_CGROUP */ |
516 | ||
cf6d6238 PB |
517 | static inline void bio_set_dev(struct bio *bio, struct block_device *bdev) |
518 | { | |
519 | bio_clear_flag(bio, BIO_REMAPPED); | |
520 | if (bio->bi_bdev != bdev) | |
320fb0f9 | 521 | bio_clear_flag(bio, BIO_BPS_THROTTLED); |
cf6d6238 PB |
522 | bio->bi_bdev = bdev; |
523 | bio_associate_blkg(bio); | |
524 | } | |
525 | ||
8f3d8ba2 | 526 | /* |
e686307f | 527 | * BIO list management for use by remapping drivers (e.g. DM or MD) and loop. |
8f3d8ba2 CH |
528 | * |
529 | * A bio_list anchors a singly-linked list of bios chained through the bi_next | |
530 | * member of the bio. The bio_list also caches the last list member to allow | |
531 | * fast access to the tail. | |
532 | */ | |
533 | struct bio_list { | |
534 | struct bio *head; | |
535 | struct bio *tail; | |
536 | }; | |
537 | ||
538 | static inline int bio_list_empty(const struct bio_list *bl) | |
539 | { | |
540 | return bl->head == NULL; | |
541 | } | |
542 | ||
543 | static inline void bio_list_init(struct bio_list *bl) | |
544 | { | |
545 | bl->head = bl->tail = NULL; | |
546 | } | |
547 | ||
320ae51f JA |
548 | #define BIO_EMPTY_LIST { NULL, NULL } |
549 | ||
8f3d8ba2 CH |
550 | #define bio_list_for_each(bio, bl) \ |
551 | for (bio = (bl)->head; bio; bio = bio->bi_next) | |
552 | ||
553 | static inline unsigned bio_list_size(const struct bio_list *bl) | |
554 | { | |
555 | unsigned sz = 0; | |
556 | struct bio *bio; | |
557 | ||
558 | bio_list_for_each(bio, bl) | |
559 | sz++; | |
560 | ||
561 | return sz; | |
562 | } | |
563 | ||
564 | static inline void bio_list_add(struct bio_list *bl, struct bio *bio) | |
565 | { | |
566 | bio->bi_next = NULL; | |
567 | ||
568 | if (bl->tail) | |
569 | bl->tail->bi_next = bio; | |
570 | else | |
571 | bl->head = bio; | |
572 | ||
573 | bl->tail = bio; | |
574 | } | |
575 | ||
576 | static inline void bio_list_add_head(struct bio_list *bl, struct bio *bio) | |
577 | { | |
578 | bio->bi_next = bl->head; | |
579 | ||
580 | bl->head = bio; | |
581 | ||
582 | if (!bl->tail) | |
583 | bl->tail = bio; | |
584 | } | |
585 | ||
586 | static inline void bio_list_merge(struct bio_list *bl, struct bio_list *bl2) | |
587 | { | |
588 | if (!bl2->head) | |
589 | return; | |
590 | ||
591 | if (bl->tail) | |
592 | bl->tail->bi_next = bl2->head; | |
593 | else | |
594 | bl->head = bl2->head; | |
595 | ||
596 | bl->tail = bl2->tail; | |
597 | } | |
598 | ||
599 | static inline void bio_list_merge_head(struct bio_list *bl, | |
600 | struct bio_list *bl2) | |
601 | { | |
602 | if (!bl2->head) | |
603 | return; | |
604 | ||
605 | if (bl->head) | |
606 | bl2->tail->bi_next = bl->head; | |
607 | else | |
608 | bl->tail = bl2->tail; | |
609 | ||
610 | bl->head = bl2->head; | |
611 | } | |
612 | ||
13685a16 GU |
613 | static inline struct bio *bio_list_peek(struct bio_list *bl) |
614 | { | |
615 | return bl->head; | |
616 | } | |
617 | ||
8f3d8ba2 CH |
618 | static inline struct bio *bio_list_pop(struct bio_list *bl) |
619 | { | |
620 | struct bio *bio = bl->head; | |
621 | ||
622 | if (bio) { | |
623 | bl->head = bl->head->bi_next; | |
624 | if (!bl->head) | |
625 | bl->tail = NULL; | |
626 | ||
627 | bio->bi_next = NULL; | |
628 | } | |
629 | ||
630 | return bio; | |
631 | } | |
632 | ||
633 | static inline struct bio *bio_list_get(struct bio_list *bl) | |
634 | { | |
635 | struct bio *bio = bl->head; | |
636 | ||
637 | bl->head = bl->tail = NULL; | |
638 | ||
639 | return bio; | |
640 | } | |
641 | ||
0ef5a50c MS |
642 | /* |
643 | * Increment chain count for the bio. Make sure the CHAIN flag update | |
644 | * is visible before the raised count. | |
645 | */ | |
646 | static inline void bio_inc_remaining(struct bio *bio) | |
647 | { | |
648 | bio_set_flag(bio, BIO_CHAIN); | |
649 | smp_mb__before_atomic(); | |
650 | atomic_inc(&bio->__bi_remaining); | |
651 | } | |
652 | ||
57fb233f KO |
653 | /* |
654 | * bio_set is used to allow other portions of the IO system to | |
655 | * allocate their own private memory pools for bio and iovec structures. | |
656 | * These memory pools in turn all allocate from the bio_slab | |
657 | * and the bvec_slabs[]. | |
658 | */ | |
659 | #define BIO_POOL_SIZE 2 | |
57fb233f KO |
660 | |
661 | struct bio_set { | |
662 | struct kmem_cache *bio_slab; | |
663 | unsigned int front_pad; | |
664 | ||
be4d234d JA |
665 | /* |
666 | * per-cpu bio alloc cache | |
667 | */ | |
668 | struct bio_alloc_cache __percpu *cache; | |
669 | ||
8aa6ba2f KO |
670 | mempool_t bio_pool; |
671 | mempool_t bvec_pool; | |
57fb233f | 672 | #if defined(CONFIG_BLK_DEV_INTEGRITY) |
8aa6ba2f KO |
673 | mempool_t bio_integrity_pool; |
674 | mempool_t bvec_integrity_pool; | |
57fb233f | 675 | #endif |
df2cb6da | 676 | |
9f180e31 | 677 | unsigned int back_pad; |
df2cb6da KO |
678 | /* |
679 | * Deadlock avoidance for stacking block drivers: see comments in | |
680 | * bio_alloc_bioset() for details | |
681 | */ | |
682 | spinlock_t rescue_lock; | |
683 | struct bio_list rescue_list; | |
684 | struct work_struct rescue_work; | |
685 | struct workqueue_struct *rescue_workqueue; | |
be4d234d JA |
686 | |
687 | /* | |
688 | * Hot un-plug notifier for the per-cpu cache, if used | |
689 | */ | |
690 | struct hlist_node cpuhp_dead; | |
57fb233f KO |
691 | }; |
692 | ||
338aa96d KO |
693 | static inline bool bioset_initialized(struct bio_set *bs) |
694 | { | |
695 | return bs->bio_slab != NULL; | |
696 | } | |
697 | ||
7ba1ba12 MP |
698 | #if defined(CONFIG_BLK_DEV_INTEGRITY) |
699 | ||
d57a5f7c KO |
700 | #define bip_for_each_vec(bvl, bip, iter) \ |
701 | for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter) | |
7ba1ba12 | 702 | |
13f05c8d MP |
703 | #define bio_for_each_integrity_vec(_bvl, _bio, _iter) \ |
704 | for_each_bio(_bio) \ | |
705 | bip_for_each_vec(_bvl, _bio->bi_integrity, _iter) | |
706 | ||
7ba1ba12 | 707 | extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int); |
7ba1ba12 | 708 | extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int); |
e23947bd | 709 | extern bool bio_integrity_prep(struct bio *); |
7ba1ba12 | 710 | extern void bio_integrity_advance(struct bio *, unsigned int); |
fbd08e76 | 711 | extern void bio_integrity_trim(struct bio *); |
1e2a410f | 712 | extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t); |
7878cba9 MP |
713 | extern int bioset_integrity_create(struct bio_set *, int); |
714 | extern void bioset_integrity_free(struct bio_set *); | |
715 | extern void bio_integrity_init(void); | |
7ba1ba12 MP |
716 | |
717 | #else /* CONFIG_BLK_DEV_INTEGRITY */ | |
718 | ||
c611529e | 719 | static inline void *bio_integrity(struct bio *bio) |
6898e3bd | 720 | { |
c611529e | 721 | return NULL; |
6898e3bd MP |
722 | } |
723 | ||
6898e3bd MP |
724 | static inline int bioset_integrity_create(struct bio_set *bs, int pool_size) |
725 | { | |
726 | return 0; | |
727 | } | |
728 | ||
729 | static inline void bioset_integrity_free (struct bio_set *bs) | |
730 | { | |
731 | return; | |
732 | } | |
733 | ||
e23947bd | 734 | static inline bool bio_integrity_prep(struct bio *bio) |
6898e3bd | 735 | { |
e23947bd | 736 | return true; |
6898e3bd MP |
737 | } |
738 | ||
0c614e2d | 739 | static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src, |
1e2a410f | 740 | gfp_t gfp_mask) |
0c614e2d SR |
741 | { |
742 | return 0; | |
743 | } | |
6898e3bd | 744 | |
6898e3bd MP |
745 | static inline void bio_integrity_advance(struct bio *bio, |
746 | unsigned int bytes_done) | |
747 | { | |
748 | return; | |
749 | } | |
750 | ||
fbd08e76 | 751 | static inline void bio_integrity_trim(struct bio *bio) |
6898e3bd MP |
752 | { |
753 | return; | |
754 | } | |
755 | ||
756 | static inline void bio_integrity_init(void) | |
757 | { | |
758 | return; | |
759 | } | |
7ba1ba12 | 760 | |
c611529e MP |
761 | static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag) |
762 | { | |
763 | return false; | |
764 | } | |
765 | ||
06c1e390 KB |
766 | static inline void *bio_integrity_alloc(struct bio * bio, gfp_t gfp, |
767 | unsigned int nr) | |
768 | { | |
769 | return ERR_PTR(-EINVAL); | |
770 | } | |
771 | ||
772 | static inline int bio_integrity_add_page(struct bio *bio, struct page *page, | |
773 | unsigned int len, unsigned int offset) | |
774 | { | |
775 | return 0; | |
776 | } | |
777 | ||
7ba1ba12 MP |
778 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ |
779 | ||
0bbb280d JA |
780 | /* |
781 | * Mark a bio as polled. Note that for async polled IO, the caller must | |
782 | * expect -EWOULDBLOCK if we cannot allocate a request (or other resources). | |
783 | * We cannot block waiting for requests on polled IO, as those completions | |
784 | * must be found by the caller. This is different than IRQ driven IO, where | |
785 | * it's safe to wait for IO to complete. | |
786 | */ | |
787 | static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb) | |
788 | { | |
6ce913fe | 789 | bio->bi_opf |= REQ_POLLED; |
0bbb280d JA |
790 | if (!is_sync_kiocb(kiocb)) |
791 | bio->bi_opf |= REQ_NOWAIT; | |
792 | } | |
793 | ||
b53f3dcd MS |
794 | static inline void bio_clear_polled(struct bio *bio) |
795 | { | |
53eab8e7 | 796 | bio->bi_opf &= ~REQ_POLLED; |
b53f3dcd MS |
797 | } |
798 | ||
0a3140ea | 799 | struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev, |
16458cf3 | 800 | unsigned int nr_pages, blk_opf_t opf, gfp_t gfp); |
c28a6147 | 801 | |
1da177e4 | 802 | #endif /* __LINUX_BIO_H */ |