Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
d6d48196 JA |
2 | /* |
3 | * Functions related to segment and merge handling | |
4 | */ | |
5 | #include <linux/kernel.h> | |
6 | #include <linux/module.h> | |
7 | #include <linux/bio.h> | |
8 | #include <linux/blkdev.h> | |
9 | #include <linux/scatterlist.h> | |
10 | ||
cda22646 MK |
11 | #include <trace/events/block.h> |
12 | ||
d6d48196 JA |
13 | #include "blk.h" |
14 | ||
e9907009 CH |
15 | static inline bool bio_will_gap(struct request_queue *q, |
16 | struct request *prev_rq, struct bio *prev, struct bio *next) | |
17 | { | |
18 | struct bio_vec pb, nb; | |
19 | ||
20 | if (!bio_has_data(prev) || !queue_virt_boundary(q)) | |
21 | return false; | |
22 | ||
23 | /* | |
24 | * Don't merge if the 1st bio starts with non-zero offset, otherwise it | |
25 | * is quite difficult to respect the sg gap limit. We work hard to | |
26 | * merge a huge number of small single bios in case of mkfs. | |
27 | */ | |
28 | if (prev_rq) | |
29 | bio_get_first_bvec(prev_rq->bio, &pb); | |
30 | else | |
31 | bio_get_first_bvec(prev, &pb); | |
df376b2e | 32 | if (pb.bv_offset & queue_virt_boundary(q)) |
e9907009 CH |
33 | return true; |
34 | ||
35 | /* | |
36 | * We don't need to worry about the situation that the merged segment | |
37 | * ends in unaligned virt boundary: | |
38 | * | |
39 | * - if 'pb' ends aligned, the merged segment ends aligned | |
40 | * - if 'pb' ends unaligned, the next bio must include | |
41 | * one single bvec of 'nb', otherwise the 'nb' can't | |
42 | * merge with 'pb' | |
43 | */ | |
44 | bio_get_last_bvec(prev, &pb); | |
45 | bio_get_first_bvec(next, &nb); | |
200a9aff | 46 | if (biovec_phys_mergeable(q, &pb, &nb)) |
e9907009 CH |
47 | return false; |
48 | return __bvec_gap_to_prev(q, &pb, nb.bv_offset); | |
49 | } | |
50 | ||
51 | static inline bool req_gap_back_merge(struct request *req, struct bio *bio) | |
52 | { | |
53 | return bio_will_gap(req->q, req, req->biotail, bio); | |
54 | } | |
55 | ||
56 | static inline bool req_gap_front_merge(struct request *req, struct bio *bio) | |
57 | { | |
58 | return bio_will_gap(req->q, NULL, bio, req->bio); | |
59 | } | |
60 | ||
54efd50b KO |
61 | static struct bio *blk_bio_discard_split(struct request_queue *q, |
62 | struct bio *bio, | |
bdced438 ML |
63 | struct bio_set *bs, |
64 | unsigned *nsegs) | |
54efd50b KO |
65 | { |
66 | unsigned int max_discard_sectors, granularity; | |
67 | int alignment; | |
68 | sector_t tmp; | |
69 | unsigned split_sectors; | |
70 | ||
bdced438 ML |
71 | *nsegs = 1; |
72 | ||
54efd50b KO |
73 | /* Zero-sector (unknown) and one-sector granularities are the same. */ |
74 | granularity = max(q->limits.discard_granularity >> 9, 1U); | |
75 | ||
1adfc5e4 ML |
76 | max_discard_sectors = min(q->limits.max_discard_sectors, |
77 | bio_allowed_max_sectors(q)); | |
54efd50b KO |
78 | max_discard_sectors -= max_discard_sectors % granularity; |
79 | ||
80 | if (unlikely(!max_discard_sectors)) { | |
81 | /* XXX: warn */ | |
82 | return NULL; | |
83 | } | |
84 | ||
85 | if (bio_sectors(bio) <= max_discard_sectors) | |
86 | return NULL; | |
87 | ||
88 | split_sectors = max_discard_sectors; | |
89 | ||
90 | /* | |
91 | * If the next starting sector would be misaligned, stop the discard at | |
92 | * the previous aligned sector. | |
93 | */ | |
94 | alignment = (q->limits.discard_alignment >> 9) % granularity; | |
95 | ||
96 | tmp = bio->bi_iter.bi_sector + split_sectors - alignment; | |
97 | tmp = sector_div(tmp, granularity); | |
98 | ||
99 | if (split_sectors > tmp) | |
100 | split_sectors -= tmp; | |
101 | ||
102 | return bio_split(bio, split_sectors, GFP_NOIO, bs); | |
103 | } | |
104 | ||
885fa13f CH |
105 | static struct bio *blk_bio_write_zeroes_split(struct request_queue *q, |
106 | struct bio *bio, struct bio_set *bs, unsigned *nsegs) | |
107 | { | |
d665e12a | 108 | *nsegs = 0; |
885fa13f CH |
109 | |
110 | if (!q->limits.max_write_zeroes_sectors) | |
111 | return NULL; | |
112 | ||
113 | if (bio_sectors(bio) <= q->limits.max_write_zeroes_sectors) | |
114 | return NULL; | |
115 | ||
116 | return bio_split(bio, q->limits.max_write_zeroes_sectors, GFP_NOIO, bs); | |
117 | } | |
118 | ||
54efd50b KO |
119 | static struct bio *blk_bio_write_same_split(struct request_queue *q, |
120 | struct bio *bio, | |
bdced438 ML |
121 | struct bio_set *bs, |
122 | unsigned *nsegs) | |
54efd50b | 123 | { |
bdced438 ML |
124 | *nsegs = 1; |
125 | ||
54efd50b KO |
126 | if (!q->limits.max_write_same_sectors) |
127 | return NULL; | |
128 | ||
129 | if (bio_sectors(bio) <= q->limits.max_write_same_sectors) | |
130 | return NULL; | |
131 | ||
132 | return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs); | |
133 | } | |
134 | ||
d0e5fbb0 ML |
135 | static inline unsigned get_max_io_size(struct request_queue *q, |
136 | struct bio *bio) | |
137 | { | |
138 | unsigned sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector); | |
139 | unsigned mask = queue_logical_block_size(q) - 1; | |
140 | ||
141 | /* aligned to logical block size */ | |
142 | sectors &= ~(mask >> 9); | |
143 | ||
144 | return sectors; | |
145 | } | |
146 | ||
dcebd755 ML |
147 | static unsigned get_max_segment_size(struct request_queue *q, |
148 | unsigned offset) | |
149 | { | |
150 | unsigned long mask = queue_segment_boundary(q); | |
151 | ||
152 | /* default segment boundary mask means no boundary limit */ | |
153 | if (mask == BLK_SEG_BOUNDARY_MASK) | |
154 | return queue_max_segment_size(q); | |
155 | ||
156 | return min_t(unsigned long, mask - (mask & offset) + 1, | |
157 | queue_max_segment_size(q)); | |
158 | } | |
159 | ||
160 | /* | |
161 | * Split the bvec @bv into segments, and update all kinds of | |
162 | * variables. | |
163 | */ | |
164 | static bool bvec_split_segs(struct request_queue *q, struct bio_vec *bv, | |
6869875f | 165 | unsigned *nsegs, unsigned *sectors, unsigned max_segs) |
dcebd755 ML |
166 | { |
167 | unsigned len = bv->bv_len; | |
168 | unsigned total_len = 0; | |
169 | unsigned new_nsegs = 0, seg_size = 0; | |
170 | ||
171 | /* | |
172 | * Multi-page bvec may be too big to hold in one segment, so the | |
173 | * current bvec has to be splitted as multiple segments. | |
174 | */ | |
05b700ba | 175 | while (len && new_nsegs + *nsegs < max_segs) { |
dcebd755 ML |
176 | seg_size = get_max_segment_size(q, bv->bv_offset + total_len); |
177 | seg_size = min(seg_size, len); | |
178 | ||
179 | new_nsegs++; | |
180 | total_len += seg_size; | |
181 | len -= seg_size; | |
182 | ||
183 | if ((bv->bv_offset + total_len) & queue_virt_boundary(q)) | |
184 | break; | |
185 | } | |
186 | ||
6869875f CH |
187 | if (new_nsegs) { |
188 | *nsegs += new_nsegs; | |
189 | if (sectors) | |
190 | *sectors += total_len >> 9; | |
dcebd755 ML |
191 | } |
192 | ||
dcebd755 ML |
193 | /* split in the middle of the bvec if len != 0 */ |
194 | return !!len; | |
195 | } | |
196 | ||
54efd50b KO |
197 | static struct bio *blk_bio_segment_split(struct request_queue *q, |
198 | struct bio *bio, | |
bdced438 ML |
199 | struct bio_set *bs, |
200 | unsigned *segs) | |
54efd50b | 201 | { |
5014c311 | 202 | struct bio_vec bv, bvprv, *bvprvp = NULL; |
54efd50b | 203 | struct bvec_iter iter; |
6869875f | 204 | unsigned nsegs = 0, sectors = 0; |
d0e5fbb0 | 205 | const unsigned max_sectors = get_max_io_size(q, bio); |
05b700ba | 206 | const unsigned max_segs = queue_max_segments(q); |
54efd50b | 207 | |
dcebd755 | 208 | bio_for_each_bvec(bv, bio, iter) { |
54efd50b KO |
209 | /* |
210 | * If the queue doesn't support SG gaps and adding this | |
211 | * offset would create a gap, disallow it. | |
212 | */ | |
5014c311 | 213 | if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset)) |
54efd50b KO |
214 | goto split; |
215 | ||
d0e5fbb0 | 216 | if (sectors + (bv.bv_len >> 9) > max_sectors) { |
e36f6204 KB |
217 | /* |
218 | * Consider this a new segment if we're splitting in | |
219 | * the middle of this vector. | |
220 | */ | |
05b700ba | 221 | if (nsegs < max_segs && |
d0e5fbb0 | 222 | sectors < max_sectors) { |
dcebd755 ML |
223 | /* split in the middle of bvec */ |
224 | bv.bv_len = (max_sectors - sectors) << 9; | |
225 | bvec_split_segs(q, &bv, &nsegs, | |
05b700ba | 226 | §ors, max_segs); |
e36f6204 | 227 | } |
cf8c0c6a | 228 | goto split; |
e36f6204 KB |
229 | } |
230 | ||
05b700ba | 231 | if (nsegs == max_segs) |
54efd50b KO |
232 | goto split; |
233 | ||
54efd50b | 234 | bvprv = bv; |
578270bf | 235 | bvprvp = &bvprv; |
dcebd755 | 236 | |
bbcbbd56 ML |
237 | if (bv.bv_offset + bv.bv_len <= PAGE_SIZE) { |
238 | nsegs++; | |
bbcbbd56 | 239 | sectors += bv.bv_len >> 9; |
6869875f CH |
240 | } else if (bvec_split_segs(q, &bv, &nsegs, §ors, |
241 | max_segs)) { | |
dcebd755 | 242 | goto split; |
bbcbbd56 | 243 | } |
54efd50b KO |
244 | } |
245 | ||
d627065d CH |
246 | *segs = nsegs; |
247 | return NULL; | |
54efd50b | 248 | split: |
bdced438 | 249 | *segs = nsegs; |
d627065d | 250 | return bio_split(bio, sectors, GFP_NOIO, bs); |
54efd50b KO |
251 | } |
252 | ||
14ccb66b CH |
253 | void __blk_queue_split(struct request_queue *q, struct bio **bio, |
254 | unsigned int *nr_segs) | |
54efd50b | 255 | { |
14ccb66b | 256 | struct bio *split; |
54efd50b | 257 | |
7afafc8a AH |
258 | switch (bio_op(*bio)) { |
259 | case REQ_OP_DISCARD: | |
260 | case REQ_OP_SECURE_ERASE: | |
14ccb66b | 261 | split = blk_bio_discard_split(q, *bio, &q->bio_split, nr_segs); |
7afafc8a | 262 | break; |
a6f0788e | 263 | case REQ_OP_WRITE_ZEROES: |
14ccb66b CH |
264 | split = blk_bio_write_zeroes_split(q, *bio, &q->bio_split, |
265 | nr_segs); | |
a6f0788e | 266 | break; |
7afafc8a | 267 | case REQ_OP_WRITE_SAME: |
14ccb66b CH |
268 | split = blk_bio_write_same_split(q, *bio, &q->bio_split, |
269 | nr_segs); | |
7afafc8a AH |
270 | break; |
271 | default: | |
14ccb66b | 272 | split = blk_bio_segment_split(q, *bio, &q->bio_split, nr_segs); |
7afafc8a AH |
273 | break; |
274 | } | |
bdced438 | 275 | |
54efd50b | 276 | if (split) { |
6ac45aeb | 277 | /* there isn't chance to merge the splitted bio */ |
1eff9d32 | 278 | split->bi_opf |= REQ_NOMERGE; |
6ac45aeb | 279 | |
947b7ac1 JA |
280 | /* |
281 | * Since we're recursing into make_request here, ensure | |
282 | * that we mark this bio as already having entered the queue. | |
283 | * If not, and the queue is going away, we can get stuck | |
284 | * forever on waiting for the queue reference to drop. But | |
285 | * that will never happen, as we're already holding a | |
286 | * reference to it. | |
287 | */ | |
288 | bio_set_flag(*bio, BIO_QUEUE_ENTERED); | |
289 | ||
54efd50b | 290 | bio_chain(split, *bio); |
cda22646 | 291 | trace_block_split(q, split, (*bio)->bi_iter.bi_sector); |
54efd50b KO |
292 | generic_make_request(*bio); |
293 | *bio = split; | |
294 | } | |
295 | } | |
14ccb66b CH |
296 | |
297 | void blk_queue_split(struct request_queue *q, struct bio **bio) | |
298 | { | |
299 | unsigned int nr_segs; | |
300 | ||
301 | __blk_queue_split(q, bio, &nr_segs); | |
302 | } | |
54efd50b KO |
303 | EXPORT_SYMBOL(blk_queue_split); |
304 | ||
e9cd19c0 | 305 | unsigned int blk_recalc_rq_segments(struct request *rq) |
d6d48196 | 306 | { |
6869875f | 307 | unsigned int nr_phys_segs = 0; |
e9cd19c0 | 308 | struct req_iterator iter; |
6869875f | 309 | struct bio_vec bv; |
d6d48196 | 310 | |
e9cd19c0 | 311 | if (!rq->bio) |
1e428079 | 312 | return 0; |
d6d48196 | 313 | |
e9cd19c0 | 314 | switch (bio_op(rq->bio)) { |
a6f0788e CK |
315 | case REQ_OP_DISCARD: |
316 | case REQ_OP_SECURE_ERASE: | |
a6f0788e | 317 | case REQ_OP_WRITE_ZEROES: |
f9d03f96 CH |
318 | return 0; |
319 | case REQ_OP_WRITE_SAME: | |
5cb8850c | 320 | return 1; |
a6f0788e | 321 | } |
5cb8850c | 322 | |
e9cd19c0 CH |
323 | rq_for_each_bvec(bv, rq, iter) |
324 | bvec_split_segs(rq->q, &bv, &nr_phys_segs, NULL, UINT_MAX); | |
1e428079 JA |
325 | return nr_phys_segs; |
326 | } | |
327 | ||
48d7727c | 328 | static inline struct scatterlist *blk_next_sg(struct scatterlist **sg, |
862e5a5e ML |
329 | struct scatterlist *sglist) |
330 | { | |
331 | if (!*sg) | |
332 | return sglist; | |
333 | ||
334 | /* | |
335 | * If the driver previously mapped a shorter list, we could see a | |
336 | * termination bit prematurely unless it fully inits the sg table | |
337 | * on each mapping. We KNOW that there must be more entries here | |
338 | * or the driver would be buggy, so force clear the termination bit | |
339 | * to avoid doing a full sg_init_table() in drivers for each command. | |
340 | */ | |
341 | sg_unmark_end(*sg); | |
342 | return sg_next(*sg); | |
343 | } | |
344 | ||
345 | static unsigned blk_bvec_map_sg(struct request_queue *q, | |
346 | struct bio_vec *bvec, struct scatterlist *sglist, | |
347 | struct scatterlist **sg) | |
348 | { | |
349 | unsigned nbytes = bvec->bv_len; | |
8a96a0e4 | 350 | unsigned nsegs = 0, total = 0; |
862e5a5e ML |
351 | |
352 | while (nbytes > 0) { | |
8a96a0e4 CH |
353 | unsigned offset = bvec->bv_offset + total; |
354 | unsigned len = min(get_max_segment_size(q, offset), nbytes); | |
f9f76879 CH |
355 | struct page *page = bvec->bv_page; |
356 | ||
357 | /* | |
358 | * Unfortunately a fair number of drivers barf on scatterlists | |
359 | * that have an offset larger than PAGE_SIZE, despite other | |
360 | * subsystems dealing with that invariant just fine. For now | |
361 | * stick to the legacy format where we never present those from | |
362 | * the block layer, but the code below should be removed once | |
363 | * these offenders (mostly MMC/SD drivers) are fixed. | |
364 | */ | |
365 | page += (offset >> PAGE_SHIFT); | |
366 | offset &= ~PAGE_MASK; | |
862e5a5e ML |
367 | |
368 | *sg = blk_next_sg(sg, sglist); | |
f9f76879 | 369 | sg_set_page(*sg, page, len, offset); |
862e5a5e | 370 | |
8a96a0e4 CH |
371 | total += len; |
372 | nbytes -= len; | |
862e5a5e ML |
373 | nsegs++; |
374 | } | |
375 | ||
376 | return nsegs; | |
377 | } | |
378 | ||
16e3e418 ML |
379 | static inline int __blk_bvec_map_sg(struct bio_vec bv, |
380 | struct scatterlist *sglist, struct scatterlist **sg) | |
381 | { | |
382 | *sg = blk_next_sg(sg, sglist); | |
383 | sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset); | |
384 | return 1; | |
385 | } | |
386 | ||
f6970f83 ML |
387 | /* only try to merge bvecs into one sg if they are from two bios */ |
388 | static inline bool | |
389 | __blk_segment_map_sg_merge(struct request_queue *q, struct bio_vec *bvec, | |
390 | struct bio_vec *bvprv, struct scatterlist **sg) | |
963ab9e5 AH |
391 | { |
392 | ||
393 | int nbytes = bvec->bv_len; | |
394 | ||
f6970f83 ML |
395 | if (!*sg) |
396 | return false; | |
963ab9e5 | 397 | |
f6970f83 ML |
398 | if ((*sg)->length + nbytes > queue_max_segment_size(q)) |
399 | return false; | |
400 | ||
401 | if (!biovec_phys_mergeable(q, bvprv, bvec)) | |
402 | return false; | |
403 | ||
404 | (*sg)->length += nbytes; | |
405 | ||
406 | return true; | |
963ab9e5 AH |
407 | } |
408 | ||
5cb8850c KO |
409 | static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio, |
410 | struct scatterlist *sglist, | |
411 | struct scatterlist **sg) | |
d6d48196 | 412 | { |
b21e11c5 | 413 | struct bio_vec uninitialized_var(bvec), bvprv = { NULL }; |
5cb8850c | 414 | struct bvec_iter iter; |
38417468 | 415 | int nsegs = 0; |
f6970f83 | 416 | bool new_bio = false; |
5cb8850c | 417 | |
f6970f83 ML |
418 | for_each_bio(bio) { |
419 | bio_for_each_bvec(bvec, bio, iter) { | |
420 | /* | |
421 | * Only try to merge bvecs from two bios given we | |
422 | * have done bio internal merge when adding pages | |
423 | * to bio | |
424 | */ | |
425 | if (new_bio && | |
426 | __blk_segment_map_sg_merge(q, &bvec, &bvprv, sg)) | |
427 | goto next_bvec; | |
428 | ||
429 | if (bvec.bv_offset + bvec.bv_len <= PAGE_SIZE) | |
430 | nsegs += __blk_bvec_map_sg(bvec, sglist, sg); | |
431 | else | |
432 | nsegs += blk_bvec_map_sg(q, &bvec, sglist, sg); | |
433 | next_bvec: | |
434 | new_bio = false; | |
435 | } | |
b21e11c5 ML |
436 | if (likely(bio->bi_iter.bi_size)) { |
437 | bvprv = bvec; | |
438 | new_bio = true; | |
439 | } | |
f6970f83 | 440 | } |
d6d48196 | 441 | |
5cb8850c KO |
442 | return nsegs; |
443 | } | |
444 | ||
445 | /* | |
446 | * map a request to scatterlist, return number of sg entries setup. Caller | |
447 | * must make sure sg can hold rq->nr_phys_segments entries | |
448 | */ | |
449 | int blk_rq_map_sg(struct request_queue *q, struct request *rq, | |
450 | struct scatterlist *sglist) | |
451 | { | |
452 | struct scatterlist *sg = NULL; | |
453 | int nsegs = 0; | |
454 | ||
f9d03f96 | 455 | if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) |
cae6c2e5 | 456 | nsegs = __blk_bvec_map_sg(rq->special_vec, sglist, &sg); |
f9d03f96 | 457 | else if (rq->bio && bio_op(rq->bio) == REQ_OP_WRITE_SAME) |
cae6c2e5 | 458 | nsegs = __blk_bvec_map_sg(bio_iovec(rq->bio), sglist, &sg); |
f9d03f96 | 459 | else if (rq->bio) |
5cb8850c | 460 | nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg); |
f18573ab | 461 | |
e8064021 | 462 | if (unlikely(rq->rq_flags & RQF_COPY_USER) && |
2e46e8b2 TH |
463 | (blk_rq_bytes(rq) & q->dma_pad_mask)) { |
464 | unsigned int pad_len = | |
465 | (q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1; | |
f18573ab FT |
466 | |
467 | sg->length += pad_len; | |
468 | rq->extra_len += pad_len; | |
469 | } | |
470 | ||
2fb98e84 | 471 | if (q->dma_drain_size && q->dma_drain_needed(rq)) { |
a8ebb056 | 472 | if (op_is_write(req_op(rq))) |
db0a2e00 TH |
473 | memset(q->dma_drain_buffer, 0, q->dma_drain_size); |
474 | ||
da81ed16 | 475 | sg_unmark_end(sg); |
d6d48196 JA |
476 | sg = sg_next(sg); |
477 | sg_set_page(sg, virt_to_page(q->dma_drain_buffer), | |
478 | q->dma_drain_size, | |
479 | ((unsigned long)q->dma_drain_buffer) & | |
480 | (PAGE_SIZE - 1)); | |
481 | nsegs++; | |
7a85f889 | 482 | rq->extra_len += q->dma_drain_size; |
d6d48196 JA |
483 | } |
484 | ||
485 | if (sg) | |
486 | sg_mark_end(sg); | |
487 | ||
12e57f59 ML |
488 | /* |
489 | * Something must have been wrong if the figured number of | |
490 | * segment is bigger than number of req's physical segments | |
491 | */ | |
f9d03f96 | 492 | WARN_ON(nsegs > blk_rq_nr_phys_segments(rq)); |
12e57f59 | 493 | |
d6d48196 JA |
494 | return nsegs; |
495 | } | |
d6d48196 JA |
496 | EXPORT_SYMBOL(blk_rq_map_sg); |
497 | ||
14ccb66b CH |
498 | static inline int ll_new_hw_segment(struct request *req, struct bio *bio, |
499 | unsigned int nr_phys_segs) | |
d6d48196 | 500 | { |
14ccb66b | 501 | if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(req->q)) |
13f05c8d MP |
502 | goto no_merge; |
503 | ||
14ccb66b | 504 | if (blk_integrity_merge_bio(req->q, req, bio) == false) |
13f05c8d | 505 | goto no_merge; |
d6d48196 JA |
506 | |
507 | /* | |
508 | * This will form the start of a new hw segment. Bump both | |
509 | * counters. | |
510 | */ | |
d6d48196 JA |
511 | req->nr_phys_segments += nr_phys_segs; |
512 | return 1; | |
13f05c8d MP |
513 | |
514 | no_merge: | |
14ccb66b | 515 | req_set_nomerge(req->q, req); |
13f05c8d | 516 | return 0; |
d6d48196 JA |
517 | } |
518 | ||
14ccb66b | 519 | int ll_back_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs) |
d6d48196 | 520 | { |
5e7c4274 JA |
521 | if (req_gap_back_merge(req, bio)) |
522 | return 0; | |
7f39add3 SG |
523 | if (blk_integrity_rq(req) && |
524 | integrity_req_gap_back_merge(req, bio)) | |
525 | return 0; | |
f31dc1cd | 526 | if (blk_rq_sectors(req) + bio_sectors(bio) > |
17007f39 | 527 | blk_rq_get_max_sectors(req, blk_rq_pos(req))) { |
14ccb66b | 528 | req_set_nomerge(req->q, req); |
d6d48196 JA |
529 | return 0; |
530 | } | |
d6d48196 | 531 | |
14ccb66b | 532 | return ll_new_hw_segment(req, bio, nr_segs); |
d6d48196 JA |
533 | } |
534 | ||
14ccb66b | 535 | int ll_front_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs) |
d6d48196 | 536 | { |
5e7c4274 JA |
537 | if (req_gap_front_merge(req, bio)) |
538 | return 0; | |
7f39add3 SG |
539 | if (blk_integrity_rq(req) && |
540 | integrity_req_gap_front_merge(req, bio)) | |
541 | return 0; | |
f31dc1cd | 542 | if (blk_rq_sectors(req) + bio_sectors(bio) > |
17007f39 | 543 | blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) { |
14ccb66b | 544 | req_set_nomerge(req->q, req); |
d6d48196 JA |
545 | return 0; |
546 | } | |
d6d48196 | 547 | |
14ccb66b | 548 | return ll_new_hw_segment(req, bio, nr_segs); |
d6d48196 JA |
549 | } |
550 | ||
445251d0 JA |
551 | static bool req_attempt_discard_merge(struct request_queue *q, struct request *req, |
552 | struct request *next) | |
553 | { | |
554 | unsigned short segments = blk_rq_nr_discard_segments(req); | |
555 | ||
556 | if (segments >= queue_max_discard_segments(q)) | |
557 | goto no_merge; | |
558 | if (blk_rq_sectors(req) + bio_sectors(next->bio) > | |
559 | blk_rq_get_max_sectors(req, blk_rq_pos(req))) | |
560 | goto no_merge; | |
561 | ||
562 | req->nr_phys_segments = segments + blk_rq_nr_discard_segments(next); | |
563 | return true; | |
564 | no_merge: | |
565 | req_set_nomerge(q, req); | |
566 | return false; | |
567 | } | |
568 | ||
d6d48196 JA |
569 | static int ll_merge_requests_fn(struct request_queue *q, struct request *req, |
570 | struct request *next) | |
571 | { | |
572 | int total_phys_segments; | |
d6d48196 | 573 | |
5e7c4274 | 574 | if (req_gap_back_merge(req, next->bio)) |
854fbb9c KB |
575 | return 0; |
576 | ||
d6d48196 JA |
577 | /* |
578 | * Will it become too large? | |
579 | */ | |
f31dc1cd | 580 | if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > |
17007f39 | 581 | blk_rq_get_max_sectors(req, blk_rq_pos(req))) |
d6d48196 JA |
582 | return 0; |
583 | ||
584 | total_phys_segments = req->nr_phys_segments + next->nr_phys_segments; | |
8a78362c | 585 | if (total_phys_segments > queue_max_segments(q)) |
d6d48196 JA |
586 | return 0; |
587 | ||
4eaf99be | 588 | if (blk_integrity_merge_rq(q, req, next) == false) |
13f05c8d MP |
589 | return 0; |
590 | ||
d6d48196 JA |
591 | /* Merge is OK... */ |
592 | req->nr_phys_segments = total_phys_segments; | |
d6d48196 JA |
593 | return 1; |
594 | } | |
595 | ||
80a761fd TH |
596 | /** |
597 | * blk_rq_set_mixed_merge - mark a request as mixed merge | |
598 | * @rq: request to mark as mixed merge | |
599 | * | |
600 | * Description: | |
601 | * @rq is about to be mixed merged. Make sure the attributes | |
602 | * which can be mixed are set in each bio and mark @rq as mixed | |
603 | * merged. | |
604 | */ | |
605 | void blk_rq_set_mixed_merge(struct request *rq) | |
606 | { | |
607 | unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK; | |
608 | struct bio *bio; | |
609 | ||
e8064021 | 610 | if (rq->rq_flags & RQF_MIXED_MERGE) |
80a761fd TH |
611 | return; |
612 | ||
613 | /* | |
614 | * @rq will no longer represent mixable attributes for all the | |
615 | * contained bios. It will just track those of the first one. | |
616 | * Distributes the attributs to each bio. | |
617 | */ | |
618 | for (bio = rq->bio; bio; bio = bio->bi_next) { | |
1eff9d32 JA |
619 | WARN_ON_ONCE((bio->bi_opf & REQ_FAILFAST_MASK) && |
620 | (bio->bi_opf & REQ_FAILFAST_MASK) != ff); | |
621 | bio->bi_opf |= ff; | |
80a761fd | 622 | } |
e8064021 | 623 | rq->rq_flags |= RQF_MIXED_MERGE; |
80a761fd TH |
624 | } |
625 | ||
26308eab JM |
626 | static void blk_account_io_merge(struct request *req) |
627 | { | |
628 | if (blk_do_io_stat(req)) { | |
629 | struct hd_struct *part; | |
26308eab | 630 | |
112f158f | 631 | part_stat_lock(); |
09e099d4 | 632 | part = req->part; |
26308eab | 633 | |
d62e26b3 | 634 | part_dec_in_flight(req->q, part, rq_data_dir(req)); |
26308eab | 635 | |
6c23a968 | 636 | hd_struct_put(part); |
26308eab JM |
637 | part_stat_unlock(); |
638 | } | |
639 | } | |
69840466 JW |
640 | /* |
641 | * Two cases of handling DISCARD merge: | |
642 | * If max_discard_segments > 1, the driver takes every bio | |
643 | * as a range and send them to controller together. The ranges | |
644 | * needn't to be contiguous. | |
645 | * Otherwise, the bios/requests will be handled as same as | |
646 | * others which should be contiguous. | |
647 | */ | |
648 | static inline bool blk_discard_mergable(struct request *req) | |
649 | { | |
650 | if (req_op(req) == REQ_OP_DISCARD && | |
651 | queue_max_discard_segments(req->q) > 1) | |
652 | return true; | |
653 | return false; | |
654 | } | |
655 | ||
e96c0d83 EB |
656 | static enum elv_merge blk_try_req_merge(struct request *req, |
657 | struct request *next) | |
69840466 JW |
658 | { |
659 | if (blk_discard_mergable(req)) | |
660 | return ELEVATOR_DISCARD_MERGE; | |
661 | else if (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos(next)) | |
662 | return ELEVATOR_BACK_MERGE; | |
663 | ||
664 | return ELEVATOR_NO_MERGE; | |
665 | } | |
26308eab | 666 | |
d6d48196 | 667 | /* |
b973cb7e JA |
668 | * For non-mq, this has to be called with the request spinlock acquired. |
669 | * For mq with scheduling, the appropriate queue wide lock should be held. | |
d6d48196 | 670 | */ |
b973cb7e JA |
671 | static struct request *attempt_merge(struct request_queue *q, |
672 | struct request *req, struct request *next) | |
d6d48196 JA |
673 | { |
674 | if (!rq_mergeable(req) || !rq_mergeable(next)) | |
b973cb7e | 675 | return NULL; |
d6d48196 | 676 | |
288dab8a | 677 | if (req_op(req) != req_op(next)) |
b973cb7e | 678 | return NULL; |
f31dc1cd | 679 | |
d6d48196 | 680 | if (rq_data_dir(req) != rq_data_dir(next) |
2081a56b | 681 | || req->rq_disk != next->rq_disk) |
b973cb7e | 682 | return NULL; |
d6d48196 | 683 | |
8fe0d473 | 684 | if (req_op(req) == REQ_OP_WRITE_SAME && |
4363ac7c | 685 | !blk_write_same_mergeable(req->bio, next->bio)) |
b973cb7e | 686 | return NULL; |
4363ac7c | 687 | |
cb6934f8 JA |
688 | /* |
689 | * Don't allow merge of different write hints, or for a hint with | |
690 | * non-hint IO. | |
691 | */ | |
692 | if (req->write_hint != next->write_hint) | |
693 | return NULL; | |
694 | ||
668ffc03 DLM |
695 | if (req->ioprio != next->ioprio) |
696 | return NULL; | |
697 | ||
d6d48196 JA |
698 | /* |
699 | * If we are allowed to merge, then append bio list | |
700 | * from next to rq and release next. merge_requests_fn | |
701 | * will have updated segment counts, update sector | |
445251d0 JA |
702 | * counts here. Handle DISCARDs separately, as they |
703 | * have separate settings. | |
d6d48196 | 704 | */ |
69840466 JW |
705 | |
706 | switch (blk_try_req_merge(req, next)) { | |
707 | case ELEVATOR_DISCARD_MERGE: | |
445251d0 JA |
708 | if (!req_attempt_discard_merge(q, req, next)) |
709 | return NULL; | |
69840466 JW |
710 | break; |
711 | case ELEVATOR_BACK_MERGE: | |
712 | if (!ll_merge_requests_fn(q, req, next)) | |
713 | return NULL; | |
714 | break; | |
715 | default: | |
b973cb7e | 716 | return NULL; |
69840466 | 717 | } |
d6d48196 | 718 | |
80a761fd TH |
719 | /* |
720 | * If failfast settings disagree or any of the two is already | |
721 | * a mixed merge, mark both as mixed before proceeding. This | |
722 | * makes sure that all involved bios have mixable attributes | |
723 | * set properly. | |
724 | */ | |
e8064021 | 725 | if (((req->rq_flags | next->rq_flags) & RQF_MIXED_MERGE) || |
80a761fd TH |
726 | (req->cmd_flags & REQ_FAILFAST_MASK) != |
727 | (next->cmd_flags & REQ_FAILFAST_MASK)) { | |
728 | blk_rq_set_mixed_merge(req); | |
729 | blk_rq_set_mixed_merge(next); | |
730 | } | |
731 | ||
d6d48196 | 732 | /* |
522a7775 OS |
733 | * At this point we have either done a back merge or front merge. We |
734 | * need the smaller start_time_ns of the merged requests to be the | |
735 | * current request for accounting purposes. | |
d6d48196 | 736 | */ |
522a7775 OS |
737 | if (next->start_time_ns < req->start_time_ns) |
738 | req->start_time_ns = next->start_time_ns; | |
d6d48196 JA |
739 | |
740 | req->biotail->bi_next = next->bio; | |
741 | req->biotail = next->biotail; | |
742 | ||
a2dec7b3 | 743 | req->__data_len += blk_rq_bytes(next); |
d6d48196 | 744 | |
2a5cf35c | 745 | if (!blk_discard_mergable(req)) |
445251d0 | 746 | elv_merge_requests(q, req, next); |
d6d48196 | 747 | |
42dad764 JM |
748 | /* |
749 | * 'next' is going away, so update stats accordingly | |
750 | */ | |
751 | blk_account_io_merge(next); | |
d6d48196 | 752 | |
e4d750c9 JA |
753 | /* |
754 | * ownership of bio passed from next to req, return 'next' for | |
755 | * the caller to free | |
756 | */ | |
1cd96c24 | 757 | next->bio = NULL; |
b973cb7e | 758 | return next; |
d6d48196 JA |
759 | } |
760 | ||
b973cb7e | 761 | struct request *attempt_back_merge(struct request_queue *q, struct request *rq) |
d6d48196 JA |
762 | { |
763 | struct request *next = elv_latter_request(q, rq); | |
764 | ||
765 | if (next) | |
766 | return attempt_merge(q, rq, next); | |
767 | ||
b973cb7e | 768 | return NULL; |
d6d48196 JA |
769 | } |
770 | ||
b973cb7e | 771 | struct request *attempt_front_merge(struct request_queue *q, struct request *rq) |
d6d48196 JA |
772 | { |
773 | struct request *prev = elv_former_request(q, rq); | |
774 | ||
775 | if (prev) | |
776 | return attempt_merge(q, prev, rq); | |
777 | ||
b973cb7e | 778 | return NULL; |
d6d48196 | 779 | } |
5e84ea3a JA |
780 | |
781 | int blk_attempt_req_merge(struct request_queue *q, struct request *rq, | |
782 | struct request *next) | |
783 | { | |
e4d750c9 | 784 | struct request *free; |
72ef799b | 785 | |
e4d750c9 JA |
786 | free = attempt_merge(q, rq, next); |
787 | if (free) { | |
92bc5a24 | 788 | blk_put_request(free); |
e4d750c9 JA |
789 | return 1; |
790 | } | |
791 | ||
792 | return 0; | |
5e84ea3a | 793 | } |
050c8ea8 TH |
794 | |
795 | bool blk_rq_merge_ok(struct request *rq, struct bio *bio) | |
796 | { | |
e2a60da7 | 797 | if (!rq_mergeable(rq) || !bio_mergeable(bio)) |
050c8ea8 TH |
798 | return false; |
799 | ||
288dab8a | 800 | if (req_op(rq) != bio_op(bio)) |
f31dc1cd MP |
801 | return false; |
802 | ||
050c8ea8 TH |
803 | /* different data direction or already started, don't merge */ |
804 | if (bio_data_dir(bio) != rq_data_dir(rq)) | |
805 | return false; | |
806 | ||
2081a56b JA |
807 | /* must be same device */ |
808 | if (rq->rq_disk != bio->bi_disk) | |
050c8ea8 TH |
809 | return false; |
810 | ||
811 | /* only merge integrity protected bio into ditto rq */ | |
4eaf99be | 812 | if (blk_integrity_merge_bio(rq->q, rq, bio) == false) |
050c8ea8 TH |
813 | return false; |
814 | ||
4363ac7c | 815 | /* must be using the same buffer */ |
8fe0d473 | 816 | if (req_op(rq) == REQ_OP_WRITE_SAME && |
4363ac7c MP |
817 | !blk_write_same_mergeable(rq->bio, bio)) |
818 | return false; | |
819 | ||
cb6934f8 JA |
820 | /* |
821 | * Don't allow merge of different write hints, or for a hint with | |
822 | * non-hint IO. | |
823 | */ | |
824 | if (rq->write_hint != bio->bi_write_hint) | |
825 | return false; | |
826 | ||
668ffc03 DLM |
827 | if (rq->ioprio != bio_prio(bio)) |
828 | return false; | |
829 | ||
050c8ea8 TH |
830 | return true; |
831 | } | |
832 | ||
34fe7c05 | 833 | enum elv_merge blk_try_merge(struct request *rq, struct bio *bio) |
050c8ea8 | 834 | { |
69840466 | 835 | if (blk_discard_mergable(rq)) |
1e739730 CH |
836 | return ELEVATOR_DISCARD_MERGE; |
837 | else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector) | |
050c8ea8 | 838 | return ELEVATOR_BACK_MERGE; |
4f024f37 | 839 | else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector) |
050c8ea8 TH |
840 | return ELEVATOR_FRONT_MERGE; |
841 | return ELEVATOR_NO_MERGE; | |
842 | } |