Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
d6d48196 JA |
2 | /* |
3 | * Functions related to segment and merge handling | |
4 | */ | |
5 | #include <linux/kernel.h> | |
6 | #include <linux/module.h> | |
7 | #include <linux/bio.h> | |
8 | #include <linux/blkdev.h> | |
fe45e630 | 9 | #include <linux/blk-integrity.h> |
d6d48196 | 10 | #include <linux/scatterlist.h> |
82d981d4 | 11 | #include <linux/part_stat.h> |
d6d48196 | 12 | |
cda22646 MK |
13 | #include <trace/events/block.h> |
14 | ||
d6d48196 | 15 | #include "blk.h" |
2aa7745b | 16 | #include "blk-mq-sched.h" |
8e756373 | 17 | #include "blk-rq-qos.h" |
a7b36ee6 | 18 | #include "blk-throttle.h" |
d6d48196 | 19 | |
ff18d77b CH |
20 | static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv) |
21 | { | |
22 | *bv = mp_bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter); | |
23 | } | |
24 | ||
25 | static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv) | |
26 | { | |
27 | struct bvec_iter iter = bio->bi_iter; | |
28 | int idx; | |
29 | ||
30 | bio_get_first_bvec(bio, bv); | |
31 | if (bv->bv_len == bio->bi_iter.bi_size) | |
32 | return; /* this bio only has a single bvec */ | |
33 | ||
34 | bio_advance_iter(bio, &iter, iter.bi_size); | |
35 | ||
36 | if (!iter.bi_bvec_done) | |
37 | idx = iter.bi_idx - 1; | |
38 | else /* in the middle of bvec */ | |
39 | idx = iter.bi_idx; | |
40 | ||
41 | *bv = bio->bi_io_vec[idx]; | |
42 | ||
43 | /* | |
44 | * iter.bi_bvec_done records actual length of the last bvec | |
45 | * if this bio ends in the middle of one io vector | |
46 | */ | |
47 | if (iter.bi_bvec_done) | |
48 | bv->bv_len = iter.bi_bvec_done; | |
49 | } | |
50 | ||
e9907009 CH |
51 | static inline bool bio_will_gap(struct request_queue *q, |
52 | struct request *prev_rq, struct bio *prev, struct bio *next) | |
53 | { | |
54 | struct bio_vec pb, nb; | |
55 | ||
56 | if (!bio_has_data(prev) || !queue_virt_boundary(q)) | |
57 | return false; | |
58 | ||
59 | /* | |
60 | * Don't merge if the 1st bio starts with non-zero offset, otherwise it | |
61 | * is quite difficult to respect the sg gap limit. We work hard to | |
62 | * merge a huge number of small single bios in case of mkfs. | |
63 | */ | |
64 | if (prev_rq) | |
65 | bio_get_first_bvec(prev_rq->bio, &pb); | |
66 | else | |
67 | bio_get_first_bvec(prev, &pb); | |
df376b2e | 68 | if (pb.bv_offset & queue_virt_boundary(q)) |
e9907009 CH |
69 | return true; |
70 | ||
71 | /* | |
72 | * We don't need to worry about the situation that the merged segment | |
73 | * ends in unaligned virt boundary: | |
74 | * | |
75 | * - if 'pb' ends aligned, the merged segment ends aligned | |
76 | * - if 'pb' ends unaligned, the next bio must include | |
77 | * one single bvec of 'nb', otherwise the 'nb' can't | |
78 | * merge with 'pb' | |
79 | */ | |
80 | bio_get_last_bvec(prev, &pb); | |
81 | bio_get_first_bvec(next, &nb); | |
200a9aff | 82 | if (biovec_phys_mergeable(q, &pb, &nb)) |
e9907009 CH |
83 | return false; |
84 | return __bvec_gap_to_prev(q, &pb, nb.bv_offset); | |
85 | } | |
86 | ||
87 | static inline bool req_gap_back_merge(struct request *req, struct bio *bio) | |
88 | { | |
89 | return bio_will_gap(req->q, req, req->biotail, bio); | |
90 | } | |
91 | ||
92 | static inline bool req_gap_front_merge(struct request *req, struct bio *bio) | |
93 | { | |
94 | return bio_will_gap(req->q, NULL, bio, req->bio); | |
95 | } | |
96 | ||
54efd50b KO |
97 | static struct bio *blk_bio_discard_split(struct request_queue *q, |
98 | struct bio *bio, | |
bdced438 ML |
99 | struct bio_set *bs, |
100 | unsigned *nsegs) | |
54efd50b KO |
101 | { |
102 | unsigned int max_discard_sectors, granularity; | |
103 | int alignment; | |
104 | sector_t tmp; | |
105 | unsigned split_sectors; | |
106 | ||
bdced438 ML |
107 | *nsegs = 1; |
108 | ||
54efd50b KO |
109 | /* Zero-sector (unknown) and one-sector granularities are the same. */ |
110 | granularity = max(q->limits.discard_granularity >> 9, 1U); | |
111 | ||
1adfc5e4 ML |
112 | max_discard_sectors = min(q->limits.max_discard_sectors, |
113 | bio_allowed_max_sectors(q)); | |
54efd50b KO |
114 | max_discard_sectors -= max_discard_sectors % granularity; |
115 | ||
116 | if (unlikely(!max_discard_sectors)) { | |
117 | /* XXX: warn */ | |
118 | return NULL; | |
119 | } | |
120 | ||
121 | if (bio_sectors(bio) <= max_discard_sectors) | |
122 | return NULL; | |
123 | ||
124 | split_sectors = max_discard_sectors; | |
125 | ||
126 | /* | |
127 | * If the next starting sector would be misaligned, stop the discard at | |
128 | * the previous aligned sector. | |
129 | */ | |
130 | alignment = (q->limits.discard_alignment >> 9) % granularity; | |
131 | ||
132 | tmp = bio->bi_iter.bi_sector + split_sectors - alignment; | |
133 | tmp = sector_div(tmp, granularity); | |
134 | ||
135 | if (split_sectors > tmp) | |
136 | split_sectors -= tmp; | |
137 | ||
138 | return bio_split(bio, split_sectors, GFP_NOIO, bs); | |
139 | } | |
140 | ||
885fa13f CH |
141 | static struct bio *blk_bio_write_zeroes_split(struct request_queue *q, |
142 | struct bio *bio, struct bio_set *bs, unsigned *nsegs) | |
143 | { | |
d665e12a | 144 | *nsegs = 0; |
885fa13f CH |
145 | |
146 | if (!q->limits.max_write_zeroes_sectors) | |
147 | return NULL; | |
148 | ||
149 | if (bio_sectors(bio) <= q->limits.max_write_zeroes_sectors) | |
150 | return NULL; | |
151 | ||
152 | return bio_split(bio, q->limits.max_write_zeroes_sectors, GFP_NOIO, bs); | |
153 | } | |
154 | ||
54efd50b KO |
155 | static struct bio *blk_bio_write_same_split(struct request_queue *q, |
156 | struct bio *bio, | |
bdced438 ML |
157 | struct bio_set *bs, |
158 | unsigned *nsegs) | |
54efd50b | 159 | { |
bdced438 ML |
160 | *nsegs = 1; |
161 | ||
54efd50b KO |
162 | if (!q->limits.max_write_same_sectors) |
163 | return NULL; | |
164 | ||
165 | if (bio_sectors(bio) <= q->limits.max_write_same_sectors) | |
166 | return NULL; | |
167 | ||
168 | return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs); | |
169 | } | |
170 | ||
9cc5169c BVA |
171 | /* |
172 | * Return the maximum number of sectors from the start of a bio that may be | |
173 | * submitted as a single request to a block device. If enough sectors remain, | |
174 | * align the end to the physical block size. Otherwise align the end to the | |
175 | * logical block size. This approach minimizes the number of non-aligned | |
176 | * requests that are submitted to a block device if the start of a bio is not | |
177 | * aligned to a physical block boundary. | |
178 | */ | |
d0e5fbb0 ML |
179 | static inline unsigned get_max_io_size(struct request_queue *q, |
180 | struct bio *bio) | |
181 | { | |
3ee16db3 | 182 | unsigned sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector, 0); |
9cc5169c BVA |
183 | unsigned max_sectors = sectors; |
184 | unsigned pbs = queue_physical_block_size(q) >> SECTOR_SHIFT; | |
185 | unsigned lbs = queue_logical_block_size(q) >> SECTOR_SHIFT; | |
186 | unsigned start_offset = bio->bi_iter.bi_sector & (pbs - 1); | |
d0e5fbb0 | 187 | |
9cc5169c BVA |
188 | max_sectors += start_offset; |
189 | max_sectors &= ~(pbs - 1); | |
190 | if (max_sectors > start_offset) | |
191 | return max_sectors - start_offset; | |
d0e5fbb0 | 192 | |
e4b469c6 | 193 | return sectors & ~(lbs - 1); |
d0e5fbb0 ML |
194 | } |
195 | ||
429120f3 ML |
196 | static inline unsigned get_max_segment_size(const struct request_queue *q, |
197 | struct page *start_page, | |
198 | unsigned long offset) | |
dcebd755 ML |
199 | { |
200 | unsigned long mask = queue_segment_boundary(q); | |
201 | ||
429120f3 | 202 | offset = mask & (page_to_phys(start_page) + offset); |
4a2f704e ML |
203 | |
204 | /* | |
205 | * overflow may be triggered in case of zero page physical address | |
206 | * on 32bit arch, use queue's max segment size when that happens. | |
207 | */ | |
208 | return min_not_zero(mask - offset + 1, | |
209 | (unsigned long)queue_max_segment_size(q)); | |
dcebd755 ML |
210 | } |
211 | ||
708b25b3 BVA |
212 | /** |
213 | * bvec_split_segs - verify whether or not a bvec should be split in the middle | |
214 | * @q: [in] request queue associated with the bio associated with @bv | |
215 | * @bv: [in] bvec to examine | |
216 | * @nsegs: [in,out] Number of segments in the bio being built. Incremented | |
217 | * by the number of segments from @bv that may be appended to that | |
218 | * bio without exceeding @max_segs | |
219 | * @sectors: [in,out] Number of sectors in the bio being built. Incremented | |
220 | * by the number of sectors from @bv that may be appended to that | |
221 | * bio without exceeding @max_sectors | |
222 | * @max_segs: [in] upper bound for *@nsegs | |
223 | * @max_sectors: [in] upper bound for *@sectors | |
224 | * | |
225 | * When splitting a bio, it can happen that a bvec is encountered that is too | |
226 | * big to fit in a single segment and hence that it has to be split in the | |
227 | * middle. This function verifies whether or not that should happen. The value | |
228 | * %true is returned if and only if appending the entire @bv to a bio with | |
229 | * *@nsegs segments and *@sectors sectors would make that bio unacceptable for | |
230 | * the block driver. | |
dcebd755 | 231 | */ |
af2c68fe BVA |
232 | static bool bvec_split_segs(const struct request_queue *q, |
233 | const struct bio_vec *bv, unsigned *nsegs, | |
708b25b3 BVA |
234 | unsigned *sectors, unsigned max_segs, |
235 | unsigned max_sectors) | |
dcebd755 | 236 | { |
708b25b3 BVA |
237 | unsigned max_len = (min(max_sectors, UINT_MAX >> 9) - *sectors) << 9; |
238 | unsigned len = min(bv->bv_len, max_len); | |
dcebd755 | 239 | unsigned total_len = 0; |
ff9811b3 | 240 | unsigned seg_size = 0; |
dcebd755 | 241 | |
ff9811b3 | 242 | while (len && *nsegs < max_segs) { |
429120f3 ML |
243 | seg_size = get_max_segment_size(q, bv->bv_page, |
244 | bv->bv_offset + total_len); | |
dcebd755 ML |
245 | seg_size = min(seg_size, len); |
246 | ||
ff9811b3 | 247 | (*nsegs)++; |
dcebd755 ML |
248 | total_len += seg_size; |
249 | len -= seg_size; | |
250 | ||
251 | if ((bv->bv_offset + total_len) & queue_virt_boundary(q)) | |
252 | break; | |
253 | } | |
254 | ||
ff9811b3 | 255 | *sectors += total_len >> 9; |
dcebd755 | 256 | |
708b25b3 BVA |
257 | /* tell the caller to split the bvec if it is too big to fit */ |
258 | return len > 0 || bv->bv_len > max_len; | |
dcebd755 ML |
259 | } |
260 | ||
dad77584 BVA |
261 | /** |
262 | * blk_bio_segment_split - split a bio in two bios | |
263 | * @q: [in] request queue pointer | |
264 | * @bio: [in] bio to be split | |
265 | * @bs: [in] bio set to allocate the clone from | |
266 | * @segs: [out] number of segments in the bio with the first half of the sectors | |
267 | * | |
268 | * Clone @bio, update the bi_iter of the clone to represent the first sectors | |
269 | * of @bio and update @bio->bi_iter to represent the remaining sectors. The | |
270 | * following is guaranteed for the cloned bio: | |
271 | * - That it has at most get_max_io_size(@q, @bio) sectors. | |
272 | * - That it has at most queue_max_segments(@q) segments. | |
273 | * | |
274 | * Except for discard requests the cloned bio will point at the bi_io_vec of | |
275 | * the original bio. It is the responsibility of the caller to ensure that the | |
276 | * original bio is not freed before the cloned bio. The caller is also | |
277 | * responsible for ensuring that @bs is only destroyed after processing of the | |
278 | * split bio has finished. | |
279 | */ | |
54efd50b KO |
280 | static struct bio *blk_bio_segment_split(struct request_queue *q, |
281 | struct bio *bio, | |
bdced438 ML |
282 | struct bio_set *bs, |
283 | unsigned *segs) | |
54efd50b | 284 | { |
5014c311 | 285 | struct bio_vec bv, bvprv, *bvprvp = NULL; |
54efd50b | 286 | struct bvec_iter iter; |
6869875f | 287 | unsigned nsegs = 0, sectors = 0; |
d0e5fbb0 | 288 | const unsigned max_sectors = get_max_io_size(q, bio); |
05b700ba | 289 | const unsigned max_segs = queue_max_segments(q); |
54efd50b | 290 | |
dcebd755 | 291 | bio_for_each_bvec(bv, bio, iter) { |
54efd50b KO |
292 | /* |
293 | * If the queue doesn't support SG gaps and adding this | |
294 | * offset would create a gap, disallow it. | |
295 | */ | |
5014c311 | 296 | if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset)) |
54efd50b KO |
297 | goto split; |
298 | ||
708b25b3 BVA |
299 | if (nsegs < max_segs && |
300 | sectors + (bv.bv_len >> 9) <= max_sectors && | |
301 | bv.bv_offset + bv.bv_len <= PAGE_SIZE) { | |
302 | nsegs++; | |
303 | sectors += bv.bv_len >> 9; | |
304 | } else if (bvec_split_segs(q, &bv, &nsegs, §ors, max_segs, | |
305 | max_sectors)) { | |
cf8c0c6a | 306 | goto split; |
e36f6204 KB |
307 | } |
308 | ||
54efd50b | 309 | bvprv = bv; |
578270bf | 310 | bvprvp = &bvprv; |
54efd50b KO |
311 | } |
312 | ||
d627065d CH |
313 | *segs = nsegs; |
314 | return NULL; | |
54efd50b | 315 | split: |
bdced438 | 316 | *segs = nsegs; |
cc29e1bf JX |
317 | |
318 | /* | |
319 | * Bio splitting may cause subtle trouble such as hang when doing sync | |
320 | * iopoll in direct IO routine. Given performance gain of iopoll for | |
321 | * big IO can be trival, disable iopoll when split needed. | |
322 | */ | |
6ce913fe | 323 | bio_clear_polled(bio); |
d627065d | 324 | return bio_split(bio, sectors, GFP_NOIO, bs); |
54efd50b KO |
325 | } |
326 | ||
dad77584 BVA |
327 | /** |
328 | * __blk_queue_split - split a bio and submit the second half | |
abd45c15 | 329 | * @q: [in] request_queue new bio is being queued at |
dad77584 BVA |
330 | * @bio: [in, out] bio to be split |
331 | * @nr_segs: [out] number of segments in the first bio | |
332 | * | |
333 | * Split a bio into two bios, chain the two bios, submit the second half and | |
334 | * store a pointer to the first half in *@bio. If the second bio is still too | |
335 | * big it will be split by a recursive call to this function. Since this | |
309dca30 CH |
336 | * function may allocate a new bio from q->bio_split, it is the responsibility |
337 | * of the caller to ensure that q->bio_split is only released after processing | |
338 | * of the split bio has finished. | |
dad77584 | 339 | */ |
abd45c15 JA |
340 | void __blk_queue_split(struct request_queue *q, struct bio **bio, |
341 | unsigned int *nr_segs) | |
54efd50b | 342 | { |
fa532287 | 343 | struct bio *split = NULL; |
54efd50b | 344 | |
7afafc8a AH |
345 | switch (bio_op(*bio)) { |
346 | case REQ_OP_DISCARD: | |
347 | case REQ_OP_SECURE_ERASE: | |
14ccb66b | 348 | split = blk_bio_discard_split(q, *bio, &q->bio_split, nr_segs); |
7afafc8a | 349 | break; |
a6f0788e | 350 | case REQ_OP_WRITE_ZEROES: |
14ccb66b CH |
351 | split = blk_bio_write_zeroes_split(q, *bio, &q->bio_split, |
352 | nr_segs); | |
a6f0788e | 353 | break; |
7afafc8a | 354 | case REQ_OP_WRITE_SAME: |
14ccb66b CH |
355 | split = blk_bio_write_same_split(q, *bio, &q->bio_split, |
356 | nr_segs); | |
7afafc8a AH |
357 | break; |
358 | default: | |
14ccb66b | 359 | split = blk_bio_segment_split(q, *bio, &q->bio_split, nr_segs); |
7afafc8a AH |
360 | break; |
361 | } | |
bdced438 | 362 | |
54efd50b | 363 | if (split) { |
6ac45aeb | 364 | /* there isn't chance to merge the splitted bio */ |
1eff9d32 | 365 | split->bi_opf |= REQ_NOMERGE; |
6ac45aeb | 366 | |
54efd50b | 367 | bio_chain(split, *bio); |
eb6f7f7c | 368 | trace_block_split(split, (*bio)->bi_iter.bi_sector); |
ed00aabd | 369 | submit_bio_noacct(*bio); |
54efd50b | 370 | *bio = split; |
4f1e9630 CX |
371 | |
372 | blk_throtl_charge_bio_split(*bio); | |
54efd50b KO |
373 | } |
374 | } | |
14ccb66b | 375 | |
dad77584 BVA |
376 | /** |
377 | * blk_queue_split - split a bio and submit the second half | |
dad77584 BVA |
378 | * @bio: [in, out] bio to be split |
379 | * | |
380 | * Split a bio into two bios, chains the two bios, submit the second half and | |
381 | * store a pointer to the first half in *@bio. Since this function may allocate | |
309dca30 CH |
382 | * a new bio from q->bio_split, it is the responsibility of the caller to ensure |
383 | * that q->bio_split is only released after processing of the split bio has | |
384 | * finished. | |
dad77584 | 385 | */ |
f695ca38 | 386 | void blk_queue_split(struct bio **bio) |
14ccb66b | 387 | { |
859897c3 | 388 | struct request_queue *q = bdev_get_queue((*bio)->bi_bdev); |
14ccb66b CH |
389 | unsigned int nr_segs; |
390 | ||
abd45c15 JA |
391 | if (blk_may_split(q, *bio)) |
392 | __blk_queue_split(q, bio, &nr_segs); | |
14ccb66b | 393 | } |
54efd50b KO |
394 | EXPORT_SYMBOL(blk_queue_split); |
395 | ||
e9cd19c0 | 396 | unsigned int blk_recalc_rq_segments(struct request *rq) |
d6d48196 | 397 | { |
6869875f | 398 | unsigned int nr_phys_segs = 0; |
ff9811b3 | 399 | unsigned int nr_sectors = 0; |
e9cd19c0 | 400 | struct req_iterator iter; |
6869875f | 401 | struct bio_vec bv; |
d6d48196 | 402 | |
e9cd19c0 | 403 | if (!rq->bio) |
1e428079 | 404 | return 0; |
d6d48196 | 405 | |
e9cd19c0 | 406 | switch (bio_op(rq->bio)) { |
a6f0788e CK |
407 | case REQ_OP_DISCARD: |
408 | case REQ_OP_SECURE_ERASE: | |
a958937f DJ |
409 | if (queue_max_discard_segments(rq->q) > 1) { |
410 | struct bio *bio = rq->bio; | |
411 | ||
412 | for_each_bio(bio) | |
413 | nr_phys_segs++; | |
414 | return nr_phys_segs; | |
415 | } | |
416 | return 1; | |
a6f0788e | 417 | case REQ_OP_WRITE_ZEROES: |
f9d03f96 CH |
418 | return 0; |
419 | case REQ_OP_WRITE_SAME: | |
5cb8850c | 420 | return 1; |
a6f0788e | 421 | } |
5cb8850c | 422 | |
e9cd19c0 | 423 | rq_for_each_bvec(bv, rq, iter) |
ff9811b3 | 424 | bvec_split_segs(rq->q, &bv, &nr_phys_segs, &nr_sectors, |
708b25b3 | 425 | UINT_MAX, UINT_MAX); |
1e428079 JA |
426 | return nr_phys_segs; |
427 | } | |
428 | ||
48d7727c | 429 | static inline struct scatterlist *blk_next_sg(struct scatterlist **sg, |
862e5a5e ML |
430 | struct scatterlist *sglist) |
431 | { | |
432 | if (!*sg) | |
433 | return sglist; | |
434 | ||
435 | /* | |
436 | * If the driver previously mapped a shorter list, we could see a | |
437 | * termination bit prematurely unless it fully inits the sg table | |
438 | * on each mapping. We KNOW that there must be more entries here | |
439 | * or the driver would be buggy, so force clear the termination bit | |
440 | * to avoid doing a full sg_init_table() in drivers for each command. | |
441 | */ | |
442 | sg_unmark_end(*sg); | |
443 | return sg_next(*sg); | |
444 | } | |
445 | ||
446 | static unsigned blk_bvec_map_sg(struct request_queue *q, | |
447 | struct bio_vec *bvec, struct scatterlist *sglist, | |
448 | struct scatterlist **sg) | |
449 | { | |
450 | unsigned nbytes = bvec->bv_len; | |
8a96a0e4 | 451 | unsigned nsegs = 0, total = 0; |
862e5a5e ML |
452 | |
453 | while (nbytes > 0) { | |
8a96a0e4 | 454 | unsigned offset = bvec->bv_offset + total; |
429120f3 ML |
455 | unsigned len = min(get_max_segment_size(q, bvec->bv_page, |
456 | offset), nbytes); | |
f9f76879 CH |
457 | struct page *page = bvec->bv_page; |
458 | ||
459 | /* | |
460 | * Unfortunately a fair number of drivers barf on scatterlists | |
461 | * that have an offset larger than PAGE_SIZE, despite other | |
462 | * subsystems dealing with that invariant just fine. For now | |
463 | * stick to the legacy format where we never present those from | |
464 | * the block layer, but the code below should be removed once | |
465 | * these offenders (mostly MMC/SD drivers) are fixed. | |
466 | */ | |
467 | page += (offset >> PAGE_SHIFT); | |
468 | offset &= ~PAGE_MASK; | |
862e5a5e ML |
469 | |
470 | *sg = blk_next_sg(sg, sglist); | |
f9f76879 | 471 | sg_set_page(*sg, page, len, offset); |
862e5a5e | 472 | |
8a96a0e4 CH |
473 | total += len; |
474 | nbytes -= len; | |
862e5a5e ML |
475 | nsegs++; |
476 | } | |
477 | ||
478 | return nsegs; | |
479 | } | |
480 | ||
16e3e418 ML |
481 | static inline int __blk_bvec_map_sg(struct bio_vec bv, |
482 | struct scatterlist *sglist, struct scatterlist **sg) | |
483 | { | |
484 | *sg = blk_next_sg(sg, sglist); | |
485 | sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset); | |
486 | return 1; | |
487 | } | |
488 | ||
f6970f83 ML |
489 | /* only try to merge bvecs into one sg if they are from two bios */ |
490 | static inline bool | |
491 | __blk_segment_map_sg_merge(struct request_queue *q, struct bio_vec *bvec, | |
492 | struct bio_vec *bvprv, struct scatterlist **sg) | |
963ab9e5 AH |
493 | { |
494 | ||
495 | int nbytes = bvec->bv_len; | |
496 | ||
f6970f83 ML |
497 | if (!*sg) |
498 | return false; | |
963ab9e5 | 499 | |
f6970f83 ML |
500 | if ((*sg)->length + nbytes > queue_max_segment_size(q)) |
501 | return false; | |
502 | ||
503 | if (!biovec_phys_mergeable(q, bvprv, bvec)) | |
504 | return false; | |
505 | ||
506 | (*sg)->length += nbytes; | |
507 | ||
508 | return true; | |
963ab9e5 AH |
509 | } |
510 | ||
5cb8850c KO |
511 | static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio, |
512 | struct scatterlist *sglist, | |
513 | struct scatterlist **sg) | |
d6d48196 | 514 | { |
3f649ab7 | 515 | struct bio_vec bvec, bvprv = { NULL }; |
5cb8850c | 516 | struct bvec_iter iter; |
38417468 | 517 | int nsegs = 0; |
f6970f83 | 518 | bool new_bio = false; |
5cb8850c | 519 | |
f6970f83 ML |
520 | for_each_bio(bio) { |
521 | bio_for_each_bvec(bvec, bio, iter) { | |
522 | /* | |
523 | * Only try to merge bvecs from two bios given we | |
524 | * have done bio internal merge when adding pages | |
525 | * to bio | |
526 | */ | |
527 | if (new_bio && | |
528 | __blk_segment_map_sg_merge(q, &bvec, &bvprv, sg)) | |
529 | goto next_bvec; | |
530 | ||
531 | if (bvec.bv_offset + bvec.bv_len <= PAGE_SIZE) | |
532 | nsegs += __blk_bvec_map_sg(bvec, sglist, sg); | |
533 | else | |
534 | nsegs += blk_bvec_map_sg(q, &bvec, sglist, sg); | |
535 | next_bvec: | |
536 | new_bio = false; | |
537 | } | |
b21e11c5 ML |
538 | if (likely(bio->bi_iter.bi_size)) { |
539 | bvprv = bvec; | |
540 | new_bio = true; | |
541 | } | |
f6970f83 | 542 | } |
d6d48196 | 543 | |
5cb8850c KO |
544 | return nsegs; |
545 | } | |
546 | ||
547 | /* | |
548 | * map a request to scatterlist, return number of sg entries setup. Caller | |
549 | * must make sure sg can hold rq->nr_phys_segments entries | |
550 | */ | |
89de1504 CH |
551 | int __blk_rq_map_sg(struct request_queue *q, struct request *rq, |
552 | struct scatterlist *sglist, struct scatterlist **last_sg) | |
5cb8850c | 553 | { |
5cb8850c KO |
554 | int nsegs = 0; |
555 | ||
f9d03f96 | 556 | if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) |
89de1504 | 557 | nsegs = __blk_bvec_map_sg(rq->special_vec, sglist, last_sg); |
f9d03f96 | 558 | else if (rq->bio && bio_op(rq->bio) == REQ_OP_WRITE_SAME) |
89de1504 | 559 | nsegs = __blk_bvec_map_sg(bio_iovec(rq->bio), sglist, last_sg); |
f9d03f96 | 560 | else if (rq->bio) |
89de1504 | 561 | nsegs = __blk_bios_map_sg(q, rq->bio, sglist, last_sg); |
f18573ab | 562 | |
89de1504 CH |
563 | if (*last_sg) |
564 | sg_mark_end(*last_sg); | |
d6d48196 | 565 | |
12e57f59 ML |
566 | /* |
567 | * Something must have been wrong if the figured number of | |
568 | * segment is bigger than number of req's physical segments | |
569 | */ | |
f9d03f96 | 570 | WARN_ON(nsegs > blk_rq_nr_phys_segments(rq)); |
12e57f59 | 571 | |
d6d48196 JA |
572 | return nsegs; |
573 | } | |
89de1504 | 574 | EXPORT_SYMBOL(__blk_rq_map_sg); |
d6d48196 | 575 | |
943b40c8 ML |
576 | static inline unsigned int blk_rq_get_max_segments(struct request *rq) |
577 | { | |
578 | if (req_op(rq) == REQ_OP_DISCARD) | |
579 | return queue_max_discard_segments(rq->q); | |
580 | return queue_max_segments(rq->q); | |
581 | } | |
582 | ||
badf7f64 CH |
583 | static inline unsigned int blk_rq_get_max_sectors(struct request *rq, |
584 | sector_t offset) | |
585 | { | |
586 | struct request_queue *q = rq->q; | |
587 | ||
588 | if (blk_rq_is_passthrough(rq)) | |
589 | return q->limits.max_hw_sectors; | |
590 | ||
591 | if (!q->limits.chunk_sectors || | |
592 | req_op(rq) == REQ_OP_DISCARD || | |
593 | req_op(rq) == REQ_OP_SECURE_ERASE) | |
594 | return blk_queue_get_max_sectors(q, req_op(rq)); | |
595 | ||
596 | return min(blk_max_size_offset(q, offset, 0), | |
597 | blk_queue_get_max_sectors(q, req_op(rq))); | |
598 | } | |
599 | ||
14ccb66b CH |
600 | static inline int ll_new_hw_segment(struct request *req, struct bio *bio, |
601 | unsigned int nr_phys_segs) | |
d6d48196 | 602 | { |
2705dfb2 | 603 | if (blk_integrity_merge_bio(req->q, req, bio) == false) |
13f05c8d MP |
604 | goto no_merge; |
605 | ||
2705dfb2 ML |
606 | /* discard request merge won't add new segment */ |
607 | if (req_op(req) == REQ_OP_DISCARD) | |
608 | return 1; | |
609 | ||
610 | if (req->nr_phys_segments + nr_phys_segs > blk_rq_get_max_segments(req)) | |
13f05c8d | 611 | goto no_merge; |
d6d48196 JA |
612 | |
613 | /* | |
614 | * This will form the start of a new hw segment. Bump both | |
615 | * counters. | |
616 | */ | |
d6d48196 JA |
617 | req->nr_phys_segments += nr_phys_segs; |
618 | return 1; | |
13f05c8d MP |
619 | |
620 | no_merge: | |
14ccb66b | 621 | req_set_nomerge(req->q, req); |
13f05c8d | 622 | return 0; |
d6d48196 JA |
623 | } |
624 | ||
14ccb66b | 625 | int ll_back_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs) |
d6d48196 | 626 | { |
5e7c4274 JA |
627 | if (req_gap_back_merge(req, bio)) |
628 | return 0; | |
7f39add3 SG |
629 | if (blk_integrity_rq(req) && |
630 | integrity_req_gap_back_merge(req, bio)) | |
631 | return 0; | |
a892c8d5 ST |
632 | if (!bio_crypt_ctx_back_mergeable(req, bio)) |
633 | return 0; | |
f31dc1cd | 634 | if (blk_rq_sectors(req) + bio_sectors(bio) > |
17007f39 | 635 | blk_rq_get_max_sectors(req, blk_rq_pos(req))) { |
14ccb66b | 636 | req_set_nomerge(req->q, req); |
d6d48196 JA |
637 | return 0; |
638 | } | |
d6d48196 | 639 | |
14ccb66b | 640 | return ll_new_hw_segment(req, bio, nr_segs); |
d6d48196 JA |
641 | } |
642 | ||
eda5cc99 CH |
643 | static int ll_front_merge_fn(struct request *req, struct bio *bio, |
644 | unsigned int nr_segs) | |
d6d48196 | 645 | { |
5e7c4274 JA |
646 | if (req_gap_front_merge(req, bio)) |
647 | return 0; | |
7f39add3 SG |
648 | if (blk_integrity_rq(req) && |
649 | integrity_req_gap_front_merge(req, bio)) | |
650 | return 0; | |
a892c8d5 ST |
651 | if (!bio_crypt_ctx_front_mergeable(req, bio)) |
652 | return 0; | |
f31dc1cd | 653 | if (blk_rq_sectors(req) + bio_sectors(bio) > |
17007f39 | 654 | blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) { |
14ccb66b | 655 | req_set_nomerge(req->q, req); |
d6d48196 JA |
656 | return 0; |
657 | } | |
d6d48196 | 658 | |
14ccb66b | 659 | return ll_new_hw_segment(req, bio, nr_segs); |
d6d48196 JA |
660 | } |
661 | ||
445251d0 JA |
662 | static bool req_attempt_discard_merge(struct request_queue *q, struct request *req, |
663 | struct request *next) | |
664 | { | |
665 | unsigned short segments = blk_rq_nr_discard_segments(req); | |
666 | ||
667 | if (segments >= queue_max_discard_segments(q)) | |
668 | goto no_merge; | |
669 | if (blk_rq_sectors(req) + bio_sectors(next->bio) > | |
670 | blk_rq_get_max_sectors(req, blk_rq_pos(req))) | |
671 | goto no_merge; | |
672 | ||
673 | req->nr_phys_segments = segments + blk_rq_nr_discard_segments(next); | |
674 | return true; | |
675 | no_merge: | |
676 | req_set_nomerge(q, req); | |
677 | return false; | |
678 | } | |
679 | ||
d6d48196 JA |
680 | static int ll_merge_requests_fn(struct request_queue *q, struct request *req, |
681 | struct request *next) | |
682 | { | |
683 | int total_phys_segments; | |
d6d48196 | 684 | |
5e7c4274 | 685 | if (req_gap_back_merge(req, next->bio)) |
854fbb9c KB |
686 | return 0; |
687 | ||
d6d48196 JA |
688 | /* |
689 | * Will it become too large? | |
690 | */ | |
f31dc1cd | 691 | if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > |
17007f39 | 692 | blk_rq_get_max_sectors(req, blk_rq_pos(req))) |
d6d48196 JA |
693 | return 0; |
694 | ||
695 | total_phys_segments = req->nr_phys_segments + next->nr_phys_segments; | |
943b40c8 | 696 | if (total_phys_segments > blk_rq_get_max_segments(req)) |
d6d48196 JA |
697 | return 0; |
698 | ||
4eaf99be | 699 | if (blk_integrity_merge_rq(q, req, next) == false) |
13f05c8d MP |
700 | return 0; |
701 | ||
a892c8d5 ST |
702 | if (!bio_crypt_ctx_merge_rq(req, next)) |
703 | return 0; | |
704 | ||
d6d48196 JA |
705 | /* Merge is OK... */ |
706 | req->nr_phys_segments = total_phys_segments; | |
d6d48196 JA |
707 | return 1; |
708 | } | |
709 | ||
80a761fd TH |
710 | /** |
711 | * blk_rq_set_mixed_merge - mark a request as mixed merge | |
712 | * @rq: request to mark as mixed merge | |
713 | * | |
714 | * Description: | |
715 | * @rq is about to be mixed merged. Make sure the attributes | |
716 | * which can be mixed are set in each bio and mark @rq as mixed | |
717 | * merged. | |
718 | */ | |
719 | void blk_rq_set_mixed_merge(struct request *rq) | |
720 | { | |
721 | unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK; | |
722 | struct bio *bio; | |
723 | ||
e8064021 | 724 | if (rq->rq_flags & RQF_MIXED_MERGE) |
80a761fd TH |
725 | return; |
726 | ||
727 | /* | |
728 | * @rq will no longer represent mixable attributes for all the | |
729 | * contained bios. It will just track those of the first one. | |
730 | * Distributes the attributs to each bio. | |
731 | */ | |
732 | for (bio = rq->bio; bio; bio = bio->bi_next) { | |
1eff9d32 JA |
733 | WARN_ON_ONCE((bio->bi_opf & REQ_FAILFAST_MASK) && |
734 | (bio->bi_opf & REQ_FAILFAST_MASK) != ff); | |
735 | bio->bi_opf |= ff; | |
80a761fd | 736 | } |
e8064021 | 737 | rq->rq_flags |= RQF_MIXED_MERGE; |
80a761fd TH |
738 | } |
739 | ||
b9c54f56 | 740 | static void blk_account_io_merge_request(struct request *req) |
26308eab JM |
741 | { |
742 | if (blk_do_io_stat(req)) { | |
112f158f | 743 | part_stat_lock(); |
b9c54f56 | 744 | part_stat_inc(req->part, merges[op_stat_group(req_op(req))]); |
26308eab JM |
745 | part_stat_unlock(); |
746 | } | |
747 | } | |
b9c54f56 | 748 | |
e96c0d83 EB |
749 | static enum elv_merge blk_try_req_merge(struct request *req, |
750 | struct request *next) | |
69840466 JW |
751 | { |
752 | if (blk_discard_mergable(req)) | |
753 | return ELEVATOR_DISCARD_MERGE; | |
754 | else if (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos(next)) | |
755 | return ELEVATOR_BACK_MERGE; | |
756 | ||
757 | return ELEVATOR_NO_MERGE; | |
758 | } | |
26308eab | 759 | |
badf7f64 CH |
760 | static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b) |
761 | { | |
762 | if (bio_page(a) == bio_page(b) && bio_offset(a) == bio_offset(b)) | |
763 | return true; | |
764 | return false; | |
765 | } | |
766 | ||
d6d48196 | 767 | /* |
b973cb7e JA |
768 | * For non-mq, this has to be called with the request spinlock acquired. |
769 | * For mq with scheduling, the appropriate queue wide lock should be held. | |
d6d48196 | 770 | */ |
b973cb7e JA |
771 | static struct request *attempt_merge(struct request_queue *q, |
772 | struct request *req, struct request *next) | |
d6d48196 JA |
773 | { |
774 | if (!rq_mergeable(req) || !rq_mergeable(next)) | |
b973cb7e | 775 | return NULL; |
d6d48196 | 776 | |
288dab8a | 777 | if (req_op(req) != req_op(next)) |
b973cb7e | 778 | return NULL; |
f31dc1cd | 779 | |
79bb1dbd | 780 | if (rq_data_dir(req) != rq_data_dir(next)) |
b973cb7e | 781 | return NULL; |
d6d48196 | 782 | |
8fe0d473 | 783 | if (req_op(req) == REQ_OP_WRITE_SAME && |
4363ac7c | 784 | !blk_write_same_mergeable(req->bio, next->bio)) |
b973cb7e | 785 | return NULL; |
4363ac7c | 786 | |
cb6934f8 JA |
787 | /* |
788 | * Don't allow merge of different write hints, or for a hint with | |
789 | * non-hint IO. | |
790 | */ | |
791 | if (req->write_hint != next->write_hint) | |
792 | return NULL; | |
793 | ||
668ffc03 DLM |
794 | if (req->ioprio != next->ioprio) |
795 | return NULL; | |
796 | ||
d6d48196 JA |
797 | /* |
798 | * If we are allowed to merge, then append bio list | |
799 | * from next to rq and release next. merge_requests_fn | |
800 | * will have updated segment counts, update sector | |
445251d0 JA |
801 | * counts here. Handle DISCARDs separately, as they |
802 | * have separate settings. | |
d6d48196 | 803 | */ |
69840466 JW |
804 | |
805 | switch (blk_try_req_merge(req, next)) { | |
806 | case ELEVATOR_DISCARD_MERGE: | |
445251d0 JA |
807 | if (!req_attempt_discard_merge(q, req, next)) |
808 | return NULL; | |
69840466 JW |
809 | break; |
810 | case ELEVATOR_BACK_MERGE: | |
811 | if (!ll_merge_requests_fn(q, req, next)) | |
812 | return NULL; | |
813 | break; | |
814 | default: | |
b973cb7e | 815 | return NULL; |
69840466 | 816 | } |
d6d48196 | 817 | |
80a761fd TH |
818 | /* |
819 | * If failfast settings disagree or any of the two is already | |
820 | * a mixed merge, mark both as mixed before proceeding. This | |
821 | * makes sure that all involved bios have mixable attributes | |
822 | * set properly. | |
823 | */ | |
e8064021 | 824 | if (((req->rq_flags | next->rq_flags) & RQF_MIXED_MERGE) || |
80a761fd TH |
825 | (req->cmd_flags & REQ_FAILFAST_MASK) != |
826 | (next->cmd_flags & REQ_FAILFAST_MASK)) { | |
827 | blk_rq_set_mixed_merge(req); | |
828 | blk_rq_set_mixed_merge(next); | |
829 | } | |
830 | ||
d6d48196 | 831 | /* |
522a7775 OS |
832 | * At this point we have either done a back merge or front merge. We |
833 | * need the smaller start_time_ns of the merged requests to be the | |
834 | * current request for accounting purposes. | |
d6d48196 | 835 | */ |
522a7775 OS |
836 | if (next->start_time_ns < req->start_time_ns) |
837 | req->start_time_ns = next->start_time_ns; | |
d6d48196 JA |
838 | |
839 | req->biotail->bi_next = next->bio; | |
840 | req->biotail = next->biotail; | |
841 | ||
a2dec7b3 | 842 | req->__data_len += blk_rq_bytes(next); |
d6d48196 | 843 | |
2a5cf35c | 844 | if (!blk_discard_mergable(req)) |
445251d0 | 845 | elv_merge_requests(q, req, next); |
d6d48196 | 846 | |
42dad764 JM |
847 | /* |
848 | * 'next' is going away, so update stats accordingly | |
849 | */ | |
b9c54f56 | 850 | blk_account_io_merge_request(next); |
d6d48196 | 851 | |
a54895fa | 852 | trace_block_rq_merge(next); |
f3bdc62f | 853 | |
e4d750c9 JA |
854 | /* |
855 | * ownership of bio passed from next to req, return 'next' for | |
856 | * the caller to free | |
857 | */ | |
1cd96c24 | 858 | next->bio = NULL; |
b973cb7e | 859 | return next; |
d6d48196 JA |
860 | } |
861 | ||
eda5cc99 CH |
862 | static struct request *attempt_back_merge(struct request_queue *q, |
863 | struct request *rq) | |
d6d48196 JA |
864 | { |
865 | struct request *next = elv_latter_request(q, rq); | |
866 | ||
867 | if (next) | |
868 | return attempt_merge(q, rq, next); | |
869 | ||
b973cb7e | 870 | return NULL; |
d6d48196 JA |
871 | } |
872 | ||
eda5cc99 CH |
873 | static struct request *attempt_front_merge(struct request_queue *q, |
874 | struct request *rq) | |
d6d48196 JA |
875 | { |
876 | struct request *prev = elv_former_request(q, rq); | |
877 | ||
878 | if (prev) | |
879 | return attempt_merge(q, prev, rq); | |
880 | ||
b973cb7e | 881 | return NULL; |
d6d48196 | 882 | } |
5e84ea3a | 883 | |
fd2ef39c JK |
884 | /* |
885 | * Try to merge 'next' into 'rq'. Return true if the merge happened, false | |
886 | * otherwise. The caller is responsible for freeing 'next' if the merge | |
887 | * happened. | |
888 | */ | |
889 | bool blk_attempt_req_merge(struct request_queue *q, struct request *rq, | |
890 | struct request *next) | |
5e84ea3a | 891 | { |
fd2ef39c | 892 | return attempt_merge(q, rq, next); |
5e84ea3a | 893 | } |
050c8ea8 TH |
894 | |
895 | bool blk_rq_merge_ok(struct request *rq, struct bio *bio) | |
896 | { | |
e2a60da7 | 897 | if (!rq_mergeable(rq) || !bio_mergeable(bio)) |
050c8ea8 TH |
898 | return false; |
899 | ||
288dab8a | 900 | if (req_op(rq) != bio_op(bio)) |
f31dc1cd MP |
901 | return false; |
902 | ||
050c8ea8 TH |
903 | /* different data direction or already started, don't merge */ |
904 | if (bio_data_dir(bio) != rq_data_dir(rq)) | |
905 | return false; | |
906 | ||
050c8ea8 | 907 | /* only merge integrity protected bio into ditto rq */ |
4eaf99be | 908 | if (blk_integrity_merge_bio(rq->q, rq, bio) == false) |
050c8ea8 TH |
909 | return false; |
910 | ||
a892c8d5 ST |
911 | /* Only merge if the crypt contexts are compatible */ |
912 | if (!bio_crypt_rq_ctx_compatible(rq, bio)) | |
913 | return false; | |
914 | ||
4363ac7c | 915 | /* must be using the same buffer */ |
8fe0d473 | 916 | if (req_op(rq) == REQ_OP_WRITE_SAME && |
4363ac7c MP |
917 | !blk_write_same_mergeable(rq->bio, bio)) |
918 | return false; | |
919 | ||
cb6934f8 JA |
920 | /* |
921 | * Don't allow merge of different write hints, or for a hint with | |
922 | * non-hint IO. | |
923 | */ | |
924 | if (rq->write_hint != bio->bi_write_hint) | |
925 | return false; | |
926 | ||
668ffc03 DLM |
927 | if (rq->ioprio != bio_prio(bio)) |
928 | return false; | |
929 | ||
050c8ea8 TH |
930 | return true; |
931 | } | |
932 | ||
34fe7c05 | 933 | enum elv_merge blk_try_merge(struct request *rq, struct bio *bio) |
050c8ea8 | 934 | { |
69840466 | 935 | if (blk_discard_mergable(rq)) |
1e739730 CH |
936 | return ELEVATOR_DISCARD_MERGE; |
937 | else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector) | |
050c8ea8 | 938 | return ELEVATOR_BACK_MERGE; |
4f024f37 | 939 | else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector) |
050c8ea8 TH |
940 | return ELEVATOR_FRONT_MERGE; |
941 | return ELEVATOR_NO_MERGE; | |
942 | } | |
8e756373 BW |
943 | |
944 | static void blk_account_io_merge_bio(struct request *req) | |
945 | { | |
946 | if (!blk_do_io_stat(req)) | |
947 | return; | |
948 | ||
949 | part_stat_lock(); | |
950 | part_stat_inc(req->part, merges[op_stat_group(req_op(req))]); | |
951 | part_stat_unlock(); | |
952 | } | |
953 | ||
eda5cc99 CH |
954 | enum bio_merge_status { |
955 | BIO_MERGE_OK, | |
956 | BIO_MERGE_NONE, | |
957 | BIO_MERGE_FAILED, | |
958 | }; | |
959 | ||
960 | static enum bio_merge_status bio_attempt_back_merge(struct request *req, | |
961 | struct bio *bio, unsigned int nr_segs) | |
8e756373 BW |
962 | { |
963 | const int ff = bio->bi_opf & REQ_FAILFAST_MASK; | |
964 | ||
965 | if (!ll_back_merge_fn(req, bio, nr_segs)) | |
7d7ca7c5 | 966 | return BIO_MERGE_FAILED; |
8e756373 | 967 | |
e8a676d6 | 968 | trace_block_bio_backmerge(bio); |
8e756373 BW |
969 | rq_qos_merge(req->q, req, bio); |
970 | ||
971 | if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) | |
972 | blk_rq_set_mixed_merge(req); | |
973 | ||
974 | req->biotail->bi_next = bio; | |
975 | req->biotail = bio; | |
976 | req->__data_len += bio->bi_iter.bi_size; | |
977 | ||
978 | bio_crypt_free_ctx(bio); | |
979 | ||
980 | blk_account_io_merge_bio(req); | |
7d7ca7c5 | 981 | return BIO_MERGE_OK; |
8e756373 BW |
982 | } |
983 | ||
eda5cc99 CH |
984 | static enum bio_merge_status bio_attempt_front_merge(struct request *req, |
985 | struct bio *bio, unsigned int nr_segs) | |
8e756373 BW |
986 | { |
987 | const int ff = bio->bi_opf & REQ_FAILFAST_MASK; | |
988 | ||
989 | if (!ll_front_merge_fn(req, bio, nr_segs)) | |
7d7ca7c5 | 990 | return BIO_MERGE_FAILED; |
8e756373 | 991 | |
e8a676d6 | 992 | trace_block_bio_frontmerge(bio); |
8e756373 BW |
993 | rq_qos_merge(req->q, req, bio); |
994 | ||
995 | if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) | |
996 | blk_rq_set_mixed_merge(req); | |
997 | ||
998 | bio->bi_next = req->bio; | |
999 | req->bio = bio; | |
1000 | ||
1001 | req->__sector = bio->bi_iter.bi_sector; | |
1002 | req->__data_len += bio->bi_iter.bi_size; | |
1003 | ||
1004 | bio_crypt_do_front_merge(req, bio); | |
1005 | ||
1006 | blk_account_io_merge_bio(req); | |
7d7ca7c5 | 1007 | return BIO_MERGE_OK; |
8e756373 BW |
1008 | } |
1009 | ||
eda5cc99 CH |
1010 | static enum bio_merge_status bio_attempt_discard_merge(struct request_queue *q, |
1011 | struct request *req, struct bio *bio) | |
8e756373 BW |
1012 | { |
1013 | unsigned short segments = blk_rq_nr_discard_segments(req); | |
1014 | ||
1015 | if (segments >= queue_max_discard_segments(q)) | |
1016 | goto no_merge; | |
1017 | if (blk_rq_sectors(req) + bio_sectors(bio) > | |
1018 | blk_rq_get_max_sectors(req, blk_rq_pos(req))) | |
1019 | goto no_merge; | |
1020 | ||
1021 | rq_qos_merge(q, req, bio); | |
1022 | ||
1023 | req->biotail->bi_next = bio; | |
1024 | req->biotail = bio; | |
1025 | req->__data_len += bio->bi_iter.bi_size; | |
1026 | req->nr_phys_segments = segments + 1; | |
1027 | ||
1028 | blk_account_io_merge_bio(req); | |
7d7ca7c5 | 1029 | return BIO_MERGE_OK; |
8e756373 BW |
1030 | no_merge: |
1031 | req_set_nomerge(q, req); | |
7d7ca7c5 BW |
1032 | return BIO_MERGE_FAILED; |
1033 | } | |
1034 | ||
1035 | static enum bio_merge_status blk_attempt_bio_merge(struct request_queue *q, | |
1036 | struct request *rq, | |
1037 | struct bio *bio, | |
1038 | unsigned int nr_segs, | |
1039 | bool sched_allow_merge) | |
1040 | { | |
1041 | if (!blk_rq_merge_ok(rq, bio)) | |
1042 | return BIO_MERGE_NONE; | |
1043 | ||
1044 | switch (blk_try_merge(rq, bio)) { | |
1045 | case ELEVATOR_BACK_MERGE: | |
265600b7 | 1046 | if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio)) |
7d7ca7c5 BW |
1047 | return bio_attempt_back_merge(rq, bio, nr_segs); |
1048 | break; | |
1049 | case ELEVATOR_FRONT_MERGE: | |
265600b7 | 1050 | if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio)) |
7d7ca7c5 BW |
1051 | return bio_attempt_front_merge(rq, bio, nr_segs); |
1052 | break; | |
1053 | case ELEVATOR_DISCARD_MERGE: | |
1054 | return bio_attempt_discard_merge(q, rq, bio); | |
1055 | default: | |
1056 | return BIO_MERGE_NONE; | |
1057 | } | |
1058 | ||
1059 | return BIO_MERGE_FAILED; | |
8e756373 BW |
1060 | } |
1061 | ||
1062 | /** | |
1063 | * blk_attempt_plug_merge - try to merge with %current's plugged list | |
1064 | * @q: request_queue new bio is being queued at | |
1065 | * @bio: new bio being queued | |
1066 | * @nr_segs: number of segments in @bio | |
87c037d1 | 1067 | * from the passed in @q already in the plug list |
8e756373 | 1068 | * |
d38a9c04 JA |
1069 | * Determine whether @bio being queued on @q can be merged with the previous |
1070 | * request on %current's plugged list. Returns %true if merge was successful, | |
8e756373 BW |
1071 | * otherwise %false. |
1072 | * | |
1073 | * Plugging coalesces IOs from the same issuer for the same purpose without | |
1074 | * going through @q->queue_lock. As such it's more of an issuing mechanism | |
1075 | * than scheduling, and the request, while may have elvpriv data, is not | |
1076 | * added on the elevator at this point. In addition, we don't have | |
1077 | * reliable access to the elevator outside queue lock. Only check basic | |
1078 | * merging parameters without querying the elevator. | |
1079 | * | |
1080 | * Caller must ensure !blk_queue_nomerges(q) beforehand. | |
1081 | */ | |
1082 | bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, | |
0c5bcc92 | 1083 | unsigned int nr_segs) |
8e756373 BW |
1084 | { |
1085 | struct blk_plug *plug; | |
1086 | struct request *rq; | |
8e756373 BW |
1087 | |
1088 | plug = blk_mq_plug(q, bio); | |
bc490f81 | 1089 | if (!plug || rq_list_empty(plug->mq_list)) |
8e756373 BW |
1090 | return false; |
1091 | ||
d38a9c04 | 1092 | /* check the previously added entry for a quick merge attempt */ |
bc490f81 | 1093 | rq = rq_list_peek(&plug->mq_list); |
87c037d1 | 1094 | if (rq->q == q) { |
a1cb6537 ML |
1095 | if (blk_attempt_bio_merge(q, rq, bio, nr_segs, false) == |
1096 | BIO_MERGE_OK) | |
1097 | return true; | |
8e756373 | 1098 | } |
8e756373 BW |
1099 | return false; |
1100 | } | |
bdc6a287 BW |
1101 | |
1102 | /* | |
1103 | * Iterate list of requests and see if we can merge this bio with any | |
1104 | * of them. | |
1105 | */ | |
1106 | bool blk_bio_list_merge(struct request_queue *q, struct list_head *list, | |
1107 | struct bio *bio, unsigned int nr_segs) | |
1108 | { | |
1109 | struct request *rq; | |
1110 | int checked = 8; | |
1111 | ||
1112 | list_for_each_entry_reverse(rq, list, queuelist) { | |
bdc6a287 BW |
1113 | if (!checked--) |
1114 | break; | |
1115 | ||
7d7ca7c5 BW |
1116 | switch (blk_attempt_bio_merge(q, rq, bio, nr_segs, true)) { |
1117 | case BIO_MERGE_NONE: | |
bdc6a287 | 1118 | continue; |
7d7ca7c5 BW |
1119 | case BIO_MERGE_OK: |
1120 | return true; | |
1121 | case BIO_MERGE_FAILED: | |
1122 | return false; | |
bdc6a287 BW |
1123 | } |
1124 | ||
bdc6a287 BW |
1125 | } |
1126 | ||
1127 | return false; | |
1128 | } | |
1129 | EXPORT_SYMBOL_GPL(blk_bio_list_merge); | |
eda5cc99 CH |
1130 | |
1131 | bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio, | |
1132 | unsigned int nr_segs, struct request **merged_request) | |
1133 | { | |
1134 | struct request *rq; | |
1135 | ||
1136 | switch (elv_merge(q, &rq, bio)) { | |
1137 | case ELEVATOR_BACK_MERGE: | |
1138 | if (!blk_mq_sched_allow_merge(q, rq, bio)) | |
1139 | return false; | |
1140 | if (bio_attempt_back_merge(rq, bio, nr_segs) != BIO_MERGE_OK) | |
1141 | return false; | |
1142 | *merged_request = attempt_back_merge(q, rq); | |
1143 | if (!*merged_request) | |
1144 | elv_merged_request(q, rq, ELEVATOR_BACK_MERGE); | |
1145 | return true; | |
1146 | case ELEVATOR_FRONT_MERGE: | |
1147 | if (!blk_mq_sched_allow_merge(q, rq, bio)) | |
1148 | return false; | |
1149 | if (bio_attempt_front_merge(rq, bio, nr_segs) != BIO_MERGE_OK) | |
1150 | return false; | |
1151 | *merged_request = attempt_front_merge(q, rq); | |
1152 | if (!*merged_request) | |
1153 | elv_merged_request(q, rq, ELEVATOR_FRONT_MERGE); | |
1154 | return true; | |
1155 | case ELEVATOR_DISCARD_MERGE: | |
1156 | return bio_attempt_discard_merge(q, rq, bio) == BIO_MERGE_OK; | |
1157 | default: | |
1158 | return false; | |
1159 | } | |
1160 | } | |
1161 | EXPORT_SYMBOL_GPL(blk_mq_sched_try_merge); |