Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
d6d48196 JA |
2 | /* |
3 | * Functions related to segment and merge handling | |
4 | */ | |
5 | #include <linux/kernel.h> | |
6 | #include <linux/module.h> | |
7 | #include <linux/bio.h> | |
8 | #include <linux/blkdev.h> | |
9 | #include <linux/scatterlist.h> | |
10 | ||
cda22646 MK |
11 | #include <trace/events/block.h> |
12 | ||
d6d48196 JA |
13 | #include "blk.h" |
14 | ||
e9907009 CH |
15 | /* |
16 | * Check if the two bvecs from two bios can be merged to one segment. If yes, | |
17 | * no need to check gap between the two bios since the 1st bio and the 1st bvec | |
18 | * in the 2nd bio can be handled in one segment. | |
19 | */ | |
20 | static inline bool bios_segs_mergeable(struct request_queue *q, | |
21 | struct bio *prev, struct bio_vec *prev_last_bv, | |
22 | struct bio_vec *next_first_bv) | |
23 | { | |
3dccdae5 | 24 | if (!biovec_phys_mergeable(q, prev_last_bv, next_first_bv)) |
e9907009 CH |
25 | return false; |
26 | if (prev->bi_seg_back_size + next_first_bv->bv_len > | |
27 | queue_max_segment_size(q)) | |
28 | return false; | |
29 | return true; | |
30 | } | |
31 | ||
32 | static inline bool bio_will_gap(struct request_queue *q, | |
33 | struct request *prev_rq, struct bio *prev, struct bio *next) | |
34 | { | |
35 | struct bio_vec pb, nb; | |
36 | ||
37 | if (!bio_has_data(prev) || !queue_virt_boundary(q)) | |
38 | return false; | |
39 | ||
40 | /* | |
41 | * Don't merge if the 1st bio starts with non-zero offset, otherwise it | |
42 | * is quite difficult to respect the sg gap limit. We work hard to | |
43 | * merge a huge number of small single bios in case of mkfs. | |
44 | */ | |
45 | if (prev_rq) | |
46 | bio_get_first_bvec(prev_rq->bio, &pb); | |
47 | else | |
48 | bio_get_first_bvec(prev, &pb); | |
df376b2e | 49 | if (pb.bv_offset & queue_virt_boundary(q)) |
e9907009 CH |
50 | return true; |
51 | ||
52 | /* | |
53 | * We don't need to worry about the situation that the merged segment | |
54 | * ends in unaligned virt boundary: | |
55 | * | |
56 | * - if 'pb' ends aligned, the merged segment ends aligned | |
57 | * - if 'pb' ends unaligned, the next bio must include | |
58 | * one single bvec of 'nb', otherwise the 'nb' can't | |
59 | * merge with 'pb' | |
60 | */ | |
61 | bio_get_last_bvec(prev, &pb); | |
62 | bio_get_first_bvec(next, &nb); | |
63 | if (bios_segs_mergeable(q, prev, &pb, &nb)) | |
64 | return false; | |
65 | return __bvec_gap_to_prev(q, &pb, nb.bv_offset); | |
66 | } | |
67 | ||
68 | static inline bool req_gap_back_merge(struct request *req, struct bio *bio) | |
69 | { | |
70 | return bio_will_gap(req->q, req, req->biotail, bio); | |
71 | } | |
72 | ||
73 | static inline bool req_gap_front_merge(struct request *req, struct bio *bio) | |
74 | { | |
75 | return bio_will_gap(req->q, NULL, bio, req->bio); | |
76 | } | |
77 | ||
54efd50b KO |
78 | static struct bio *blk_bio_discard_split(struct request_queue *q, |
79 | struct bio *bio, | |
bdced438 ML |
80 | struct bio_set *bs, |
81 | unsigned *nsegs) | |
54efd50b KO |
82 | { |
83 | unsigned int max_discard_sectors, granularity; | |
84 | int alignment; | |
85 | sector_t tmp; | |
86 | unsigned split_sectors; | |
87 | ||
bdced438 ML |
88 | *nsegs = 1; |
89 | ||
54efd50b KO |
90 | /* Zero-sector (unknown) and one-sector granularities are the same. */ |
91 | granularity = max(q->limits.discard_granularity >> 9, 1U); | |
92 | ||
1adfc5e4 ML |
93 | max_discard_sectors = min(q->limits.max_discard_sectors, |
94 | bio_allowed_max_sectors(q)); | |
54efd50b KO |
95 | max_discard_sectors -= max_discard_sectors % granularity; |
96 | ||
97 | if (unlikely(!max_discard_sectors)) { | |
98 | /* XXX: warn */ | |
99 | return NULL; | |
100 | } | |
101 | ||
102 | if (bio_sectors(bio) <= max_discard_sectors) | |
103 | return NULL; | |
104 | ||
105 | split_sectors = max_discard_sectors; | |
106 | ||
107 | /* | |
108 | * If the next starting sector would be misaligned, stop the discard at | |
109 | * the previous aligned sector. | |
110 | */ | |
111 | alignment = (q->limits.discard_alignment >> 9) % granularity; | |
112 | ||
113 | tmp = bio->bi_iter.bi_sector + split_sectors - alignment; | |
114 | tmp = sector_div(tmp, granularity); | |
115 | ||
116 | if (split_sectors > tmp) | |
117 | split_sectors -= tmp; | |
118 | ||
119 | return bio_split(bio, split_sectors, GFP_NOIO, bs); | |
120 | } | |
121 | ||
885fa13f CH |
122 | static struct bio *blk_bio_write_zeroes_split(struct request_queue *q, |
123 | struct bio *bio, struct bio_set *bs, unsigned *nsegs) | |
124 | { | |
125 | *nsegs = 1; | |
126 | ||
127 | if (!q->limits.max_write_zeroes_sectors) | |
128 | return NULL; | |
129 | ||
130 | if (bio_sectors(bio) <= q->limits.max_write_zeroes_sectors) | |
131 | return NULL; | |
132 | ||
133 | return bio_split(bio, q->limits.max_write_zeroes_sectors, GFP_NOIO, bs); | |
134 | } | |
135 | ||
54efd50b KO |
136 | static struct bio *blk_bio_write_same_split(struct request_queue *q, |
137 | struct bio *bio, | |
bdced438 ML |
138 | struct bio_set *bs, |
139 | unsigned *nsegs) | |
54efd50b | 140 | { |
bdced438 ML |
141 | *nsegs = 1; |
142 | ||
54efd50b KO |
143 | if (!q->limits.max_write_same_sectors) |
144 | return NULL; | |
145 | ||
146 | if (bio_sectors(bio) <= q->limits.max_write_same_sectors) | |
147 | return NULL; | |
148 | ||
149 | return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs); | |
150 | } | |
151 | ||
d0e5fbb0 ML |
152 | static inline unsigned get_max_io_size(struct request_queue *q, |
153 | struct bio *bio) | |
154 | { | |
155 | unsigned sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector); | |
156 | unsigned mask = queue_logical_block_size(q) - 1; | |
157 | ||
158 | /* aligned to logical block size */ | |
159 | sectors &= ~(mask >> 9); | |
160 | ||
161 | return sectors; | |
162 | } | |
163 | ||
dcebd755 ML |
164 | static unsigned get_max_segment_size(struct request_queue *q, |
165 | unsigned offset) | |
166 | { | |
167 | unsigned long mask = queue_segment_boundary(q); | |
168 | ||
169 | /* default segment boundary mask means no boundary limit */ | |
170 | if (mask == BLK_SEG_BOUNDARY_MASK) | |
171 | return queue_max_segment_size(q); | |
172 | ||
173 | return min_t(unsigned long, mask - (mask & offset) + 1, | |
174 | queue_max_segment_size(q)); | |
175 | } | |
176 | ||
177 | /* | |
178 | * Split the bvec @bv into segments, and update all kinds of | |
179 | * variables. | |
180 | */ | |
181 | static bool bvec_split_segs(struct request_queue *q, struct bio_vec *bv, | |
182 | unsigned *nsegs, unsigned *last_seg_size, | |
05b700ba | 183 | unsigned *front_seg_size, unsigned *sectors, unsigned max_segs) |
dcebd755 ML |
184 | { |
185 | unsigned len = bv->bv_len; | |
186 | unsigned total_len = 0; | |
187 | unsigned new_nsegs = 0, seg_size = 0; | |
188 | ||
189 | /* | |
190 | * Multi-page bvec may be too big to hold in one segment, so the | |
191 | * current bvec has to be splitted as multiple segments. | |
192 | */ | |
05b700ba | 193 | while (len && new_nsegs + *nsegs < max_segs) { |
dcebd755 ML |
194 | seg_size = get_max_segment_size(q, bv->bv_offset + total_len); |
195 | seg_size = min(seg_size, len); | |
196 | ||
197 | new_nsegs++; | |
198 | total_len += seg_size; | |
199 | len -= seg_size; | |
200 | ||
201 | if ((bv->bv_offset + total_len) & queue_virt_boundary(q)) | |
202 | break; | |
203 | } | |
204 | ||
205 | if (!new_nsegs) | |
206 | return !!len; | |
207 | ||
208 | /* update front segment size */ | |
209 | if (!*nsegs) { | |
210 | unsigned first_seg_size; | |
211 | ||
212 | if (new_nsegs == 1) | |
213 | first_seg_size = get_max_segment_size(q, bv->bv_offset); | |
214 | else | |
215 | first_seg_size = queue_max_segment_size(q); | |
216 | ||
217 | if (*front_seg_size < first_seg_size) | |
218 | *front_seg_size = first_seg_size; | |
219 | } | |
220 | ||
221 | /* update other varibles */ | |
222 | *last_seg_size = seg_size; | |
223 | *nsegs += new_nsegs; | |
224 | if (sectors) | |
225 | *sectors += total_len >> 9; | |
226 | ||
227 | /* split in the middle of the bvec if len != 0 */ | |
228 | return !!len; | |
229 | } | |
230 | ||
54efd50b KO |
231 | static struct bio *blk_bio_segment_split(struct request_queue *q, |
232 | struct bio *bio, | |
bdced438 ML |
233 | struct bio_set *bs, |
234 | unsigned *segs) | |
54efd50b | 235 | { |
5014c311 | 236 | struct bio_vec bv, bvprv, *bvprvp = NULL; |
54efd50b | 237 | struct bvec_iter iter; |
8ae12666 | 238 | unsigned seg_size = 0, nsegs = 0, sectors = 0; |
02e70742 ML |
239 | unsigned front_seg_size = bio->bi_seg_front_size; |
240 | bool do_split = true; | |
241 | struct bio *new = NULL; | |
d0e5fbb0 | 242 | const unsigned max_sectors = get_max_io_size(q, bio); |
05b700ba | 243 | const unsigned max_segs = queue_max_segments(q); |
54efd50b | 244 | |
dcebd755 | 245 | bio_for_each_bvec(bv, bio, iter) { |
54efd50b KO |
246 | /* |
247 | * If the queue doesn't support SG gaps and adding this | |
248 | * offset would create a gap, disallow it. | |
249 | */ | |
5014c311 | 250 | if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset)) |
54efd50b KO |
251 | goto split; |
252 | ||
d0e5fbb0 | 253 | if (sectors + (bv.bv_len >> 9) > max_sectors) { |
e36f6204 KB |
254 | /* |
255 | * Consider this a new segment if we're splitting in | |
256 | * the middle of this vector. | |
257 | */ | |
05b700ba | 258 | if (nsegs < max_segs && |
d0e5fbb0 | 259 | sectors < max_sectors) { |
dcebd755 ML |
260 | /* split in the middle of bvec */ |
261 | bv.bv_len = (max_sectors - sectors) << 9; | |
262 | bvec_split_segs(q, &bv, &nsegs, | |
263 | &seg_size, | |
264 | &front_seg_size, | |
05b700ba | 265 | §ors, max_segs); |
e36f6204 | 266 | } |
cf8c0c6a | 267 | goto split; |
e36f6204 KB |
268 | } |
269 | ||
05b700ba | 270 | if (nsegs == max_segs) |
54efd50b KO |
271 | goto split; |
272 | ||
54efd50b | 273 | bvprv = bv; |
578270bf | 274 | bvprvp = &bvprv; |
dcebd755 | 275 | |
bbcbbd56 ML |
276 | if (bv.bv_offset + bv.bv_len <= PAGE_SIZE) { |
277 | nsegs++; | |
278 | seg_size = bv.bv_len; | |
279 | sectors += bv.bv_len >> 9; | |
280 | if (nsegs == 1 && seg_size > front_seg_size) | |
281 | front_seg_size = seg_size; | |
282 | } else if (bvec_split_segs(q, &bv, &nsegs, &seg_size, | |
05b700ba | 283 | &front_seg_size, §ors, max_segs)) { |
dcebd755 | 284 | goto split; |
bbcbbd56 | 285 | } |
54efd50b KO |
286 | } |
287 | ||
02e70742 | 288 | do_split = false; |
54efd50b | 289 | split: |
bdced438 | 290 | *segs = nsegs; |
02e70742 ML |
291 | |
292 | if (do_split) { | |
293 | new = bio_split(bio, sectors, GFP_NOIO, bs); | |
294 | if (new) | |
295 | bio = new; | |
296 | } | |
297 | ||
298 | bio->bi_seg_front_size = front_seg_size; | |
299 | if (seg_size > bio->bi_seg_back_size) | |
300 | bio->bi_seg_back_size = seg_size; | |
301 | ||
302 | return do_split ? new : NULL; | |
54efd50b KO |
303 | } |
304 | ||
af67c31f | 305 | void blk_queue_split(struct request_queue *q, struct bio **bio) |
54efd50b | 306 | { |
bdced438 ML |
307 | struct bio *split, *res; |
308 | unsigned nsegs; | |
54efd50b | 309 | |
7afafc8a AH |
310 | switch (bio_op(*bio)) { |
311 | case REQ_OP_DISCARD: | |
312 | case REQ_OP_SECURE_ERASE: | |
338aa96d | 313 | split = blk_bio_discard_split(q, *bio, &q->bio_split, &nsegs); |
7afafc8a | 314 | break; |
a6f0788e | 315 | case REQ_OP_WRITE_ZEROES: |
338aa96d | 316 | split = blk_bio_write_zeroes_split(q, *bio, &q->bio_split, &nsegs); |
a6f0788e | 317 | break; |
7afafc8a | 318 | case REQ_OP_WRITE_SAME: |
338aa96d | 319 | split = blk_bio_write_same_split(q, *bio, &q->bio_split, &nsegs); |
7afafc8a AH |
320 | break; |
321 | default: | |
338aa96d | 322 | split = blk_bio_segment_split(q, *bio, &q->bio_split, &nsegs); |
7afafc8a AH |
323 | break; |
324 | } | |
bdced438 ML |
325 | |
326 | /* physical segments can be figured out during splitting */ | |
327 | res = split ? split : *bio; | |
328 | res->bi_phys_segments = nsegs; | |
329 | bio_set_flag(res, BIO_SEG_VALID); | |
54efd50b KO |
330 | |
331 | if (split) { | |
6ac45aeb | 332 | /* there isn't chance to merge the splitted bio */ |
1eff9d32 | 333 | split->bi_opf |= REQ_NOMERGE; |
6ac45aeb | 334 | |
947b7ac1 JA |
335 | /* |
336 | * Since we're recursing into make_request here, ensure | |
337 | * that we mark this bio as already having entered the queue. | |
338 | * If not, and the queue is going away, we can get stuck | |
339 | * forever on waiting for the queue reference to drop. But | |
340 | * that will never happen, as we're already holding a | |
341 | * reference to it. | |
342 | */ | |
343 | bio_set_flag(*bio, BIO_QUEUE_ENTERED); | |
344 | ||
54efd50b | 345 | bio_chain(split, *bio); |
cda22646 | 346 | trace_block_split(q, split, (*bio)->bi_iter.bi_sector); |
54efd50b KO |
347 | generic_make_request(*bio); |
348 | *bio = split; | |
349 | } | |
350 | } | |
351 | EXPORT_SYMBOL(blk_queue_split); | |
352 | ||
1e428079 | 353 | static unsigned int __blk_recalc_rq_segments(struct request_queue *q, |
2705c937 | 354 | struct bio *bio) |
d6d48196 | 355 | { |
7988613b | 356 | struct bio_vec bv, bvprv = { NULL }; |
38417468 | 357 | int prev = 0; |
1e428079 | 358 | unsigned int seg_size, nr_phys_segs; |
49b1f22b | 359 | unsigned front_seg_size; |
59247eae | 360 | struct bio *fbio, *bbio; |
7988613b | 361 | struct bvec_iter iter; |
d6d48196 | 362 | |
1e428079 JA |
363 | if (!bio) |
364 | return 0; | |
d6d48196 | 365 | |
49b1f22b ML |
366 | front_seg_size = bio->bi_seg_front_size; |
367 | ||
a6f0788e CK |
368 | switch (bio_op(bio)) { |
369 | case REQ_OP_DISCARD: | |
370 | case REQ_OP_SECURE_ERASE: | |
a6f0788e | 371 | case REQ_OP_WRITE_ZEROES: |
f9d03f96 CH |
372 | return 0; |
373 | case REQ_OP_WRITE_SAME: | |
5cb8850c | 374 | return 1; |
a6f0788e | 375 | } |
5cb8850c | 376 | |
1e428079 | 377 | fbio = bio; |
5df97b91 | 378 | seg_size = 0; |
2c8919de | 379 | nr_phys_segs = 0; |
1e428079 | 380 | for_each_bio(bio) { |
dcebd755 | 381 | bio_for_each_bvec(bv, bio, iter) { |
38417468 | 382 | if (prev) { |
7988613b | 383 | if (seg_size + bv.bv_len |
ae03bf63 | 384 | > queue_max_segment_size(q)) |
1e428079 | 385 | goto new_segment; |
3dccdae5 | 386 | if (!biovec_phys_mergeable(q, &bvprv, &bv)) |
1e428079 | 387 | goto new_segment; |
d6d48196 | 388 | |
7988613b | 389 | seg_size += bv.bv_len; |
1e428079 | 390 | bvprv = bv; |
aaeee62c ML |
391 | |
392 | if (nr_phys_segs == 1 && seg_size > | |
393 | front_seg_size) | |
394 | front_seg_size = seg_size; | |
395 | ||
1e428079 JA |
396 | continue; |
397 | } | |
d6d48196 | 398 | new_segment: |
1e428079 | 399 | bvprv = bv; |
54efd50b | 400 | prev = 1; |
dcebd755 | 401 | bvec_split_segs(q, &bv, &nr_phys_segs, &seg_size, |
05b700ba | 402 | &front_seg_size, NULL, UINT_MAX); |
1e428079 | 403 | } |
59247eae | 404 | bbio = bio; |
d6d48196 JA |
405 | } |
406 | ||
dcebd755 | 407 | fbio->bi_seg_front_size = front_seg_size; |
59247eae JA |
408 | if (seg_size > bbio->bi_seg_back_size) |
409 | bbio->bi_seg_back_size = seg_size; | |
1e428079 JA |
410 | |
411 | return nr_phys_segs; | |
412 | } | |
413 | ||
414 | void blk_recalc_rq_segments(struct request *rq) | |
415 | { | |
2705c937 | 416 | rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio); |
d6d48196 JA |
417 | } |
418 | ||
419 | void blk_recount_segments(struct request_queue *q, struct bio *bio) | |
420 | { | |
2705c937 | 421 | struct bio *nxt = bio->bi_next; |
05f1dd53 | 422 | |
2705c937 ML |
423 | bio->bi_next = NULL; |
424 | bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio); | |
425 | bio->bi_next = nxt; | |
1e428079 | 426 | |
b7c44ed9 | 427 | bio_set_flag(bio, BIO_SEG_VALID); |
d6d48196 | 428 | } |
d6d48196 JA |
429 | |
430 | static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, | |
431 | struct bio *nxt) | |
432 | { | |
2b8221e1 | 433 | struct bio_vec end_bv = { NULL }, nxt_bv; |
f619d254 | 434 | |
86771427 | 435 | if (bio->bi_seg_back_size + nxt->bi_seg_front_size > |
ae03bf63 | 436 | queue_max_segment_size(q)) |
d6d48196 JA |
437 | return 0; |
438 | ||
e17fc0a1 DW |
439 | if (!bio_has_data(bio)) |
440 | return 1; | |
441 | ||
e827091c ML |
442 | bio_get_last_bvec(bio, &end_bv); |
443 | bio_get_first_bvec(nxt, &nxt_bv); | |
f619d254 | 444 | |
3dccdae5 | 445 | return biovec_phys_mergeable(q, &end_bv, &nxt_bv); |
d6d48196 JA |
446 | } |
447 | ||
48d7727c | 448 | static inline struct scatterlist *blk_next_sg(struct scatterlist **sg, |
862e5a5e ML |
449 | struct scatterlist *sglist) |
450 | { | |
451 | if (!*sg) | |
452 | return sglist; | |
453 | ||
454 | /* | |
455 | * If the driver previously mapped a shorter list, we could see a | |
456 | * termination bit prematurely unless it fully inits the sg table | |
457 | * on each mapping. We KNOW that there must be more entries here | |
458 | * or the driver would be buggy, so force clear the termination bit | |
459 | * to avoid doing a full sg_init_table() in drivers for each command. | |
460 | */ | |
461 | sg_unmark_end(*sg); | |
462 | return sg_next(*sg); | |
463 | } | |
464 | ||
465 | static unsigned blk_bvec_map_sg(struct request_queue *q, | |
466 | struct bio_vec *bvec, struct scatterlist *sglist, | |
467 | struct scatterlist **sg) | |
468 | { | |
469 | unsigned nbytes = bvec->bv_len; | |
470 | unsigned nsegs = 0, total = 0, offset = 0; | |
471 | ||
472 | while (nbytes > 0) { | |
473 | unsigned seg_size; | |
474 | struct page *pg; | |
475 | unsigned idx; | |
476 | ||
477 | *sg = blk_next_sg(sg, sglist); | |
478 | ||
479 | seg_size = get_max_segment_size(q, bvec->bv_offset + total); | |
480 | seg_size = min(nbytes, seg_size); | |
481 | ||
482 | offset = (total + bvec->bv_offset) % PAGE_SIZE; | |
483 | idx = (total + bvec->bv_offset) / PAGE_SIZE; | |
4d633062 | 484 | pg = bvec_nth_page(bvec->bv_page, idx); |
862e5a5e ML |
485 | |
486 | sg_set_page(*sg, pg, seg_size, offset); | |
487 | ||
488 | total += seg_size; | |
489 | nbytes -= seg_size; | |
490 | nsegs++; | |
491 | } | |
492 | ||
493 | return nsegs; | |
494 | } | |
495 | ||
7988613b | 496 | static inline void |
963ab9e5 | 497 | __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec, |
7988613b | 498 | struct scatterlist *sglist, struct bio_vec *bvprv, |
38417468 | 499 | struct scatterlist **sg, int *nsegs) |
963ab9e5 AH |
500 | { |
501 | ||
502 | int nbytes = bvec->bv_len; | |
503 | ||
38417468 | 504 | if (*sg) { |
b4b6cb61 ML |
505 | if ((*sg)->length + nbytes > queue_max_segment_size(q)) |
506 | goto new_segment; | |
3dccdae5 | 507 | if (!biovec_phys_mergeable(q, bvprv, bvec)) |
963ab9e5 AH |
508 | goto new_segment; |
509 | ||
510 | (*sg)->length += nbytes; | |
511 | } else { | |
512 | new_segment: | |
48d7727c ML |
513 | if (bvec->bv_offset + bvec->bv_len <= PAGE_SIZE) { |
514 | *sg = blk_next_sg(sg, sglist); | |
515 | sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset); | |
516 | (*nsegs) += 1; | |
517 | } else | |
518 | (*nsegs) += blk_bvec_map_sg(q, bvec, sglist, sg); | |
963ab9e5 | 519 | } |
7988613b | 520 | *bvprv = *bvec; |
963ab9e5 AH |
521 | } |
522 | ||
cae6c2e5 | 523 | static inline int __blk_bvec_map_sg(struct bio_vec bv, |
f9d03f96 CH |
524 | struct scatterlist *sglist, struct scatterlist **sg) |
525 | { | |
526 | *sg = sglist; | |
527 | sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset); | |
528 | return 1; | |
529 | } | |
530 | ||
5cb8850c KO |
531 | static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio, |
532 | struct scatterlist *sglist, | |
533 | struct scatterlist **sg) | |
d6d48196 | 534 | { |
2b8221e1 | 535 | struct bio_vec bvec, bvprv = { NULL }; |
5cb8850c | 536 | struct bvec_iter iter; |
38417468 | 537 | int nsegs = 0; |
5cb8850c KO |
538 | |
539 | for_each_bio(bio) | |
862e5a5e | 540 | bio_for_each_bvec(bvec, bio, iter) |
5cb8850c | 541 | __blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg, |
38417468 | 542 | &nsegs); |
d6d48196 | 543 | |
5cb8850c KO |
544 | return nsegs; |
545 | } | |
546 | ||
547 | /* | |
548 | * map a request to scatterlist, return number of sg entries setup. Caller | |
549 | * must make sure sg can hold rq->nr_phys_segments entries | |
550 | */ | |
551 | int blk_rq_map_sg(struct request_queue *q, struct request *rq, | |
552 | struct scatterlist *sglist) | |
553 | { | |
554 | struct scatterlist *sg = NULL; | |
555 | int nsegs = 0; | |
556 | ||
f9d03f96 | 557 | if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) |
cae6c2e5 | 558 | nsegs = __blk_bvec_map_sg(rq->special_vec, sglist, &sg); |
f9d03f96 | 559 | else if (rq->bio && bio_op(rq->bio) == REQ_OP_WRITE_SAME) |
cae6c2e5 | 560 | nsegs = __blk_bvec_map_sg(bio_iovec(rq->bio), sglist, &sg); |
f9d03f96 | 561 | else if (rq->bio) |
5cb8850c | 562 | nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg); |
f18573ab | 563 | |
e8064021 | 564 | if (unlikely(rq->rq_flags & RQF_COPY_USER) && |
2e46e8b2 TH |
565 | (blk_rq_bytes(rq) & q->dma_pad_mask)) { |
566 | unsigned int pad_len = | |
567 | (q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1; | |
f18573ab FT |
568 | |
569 | sg->length += pad_len; | |
570 | rq->extra_len += pad_len; | |
571 | } | |
572 | ||
2fb98e84 | 573 | if (q->dma_drain_size && q->dma_drain_needed(rq)) { |
a8ebb056 | 574 | if (op_is_write(req_op(rq))) |
db0a2e00 TH |
575 | memset(q->dma_drain_buffer, 0, q->dma_drain_size); |
576 | ||
da81ed16 | 577 | sg_unmark_end(sg); |
d6d48196 JA |
578 | sg = sg_next(sg); |
579 | sg_set_page(sg, virt_to_page(q->dma_drain_buffer), | |
580 | q->dma_drain_size, | |
581 | ((unsigned long)q->dma_drain_buffer) & | |
582 | (PAGE_SIZE - 1)); | |
583 | nsegs++; | |
7a85f889 | 584 | rq->extra_len += q->dma_drain_size; |
d6d48196 JA |
585 | } |
586 | ||
587 | if (sg) | |
588 | sg_mark_end(sg); | |
589 | ||
12e57f59 ML |
590 | /* |
591 | * Something must have been wrong if the figured number of | |
592 | * segment is bigger than number of req's physical segments | |
593 | */ | |
f9d03f96 | 594 | WARN_ON(nsegs > blk_rq_nr_phys_segments(rq)); |
12e57f59 | 595 | |
d6d48196 JA |
596 | return nsegs; |
597 | } | |
d6d48196 JA |
598 | EXPORT_SYMBOL(blk_rq_map_sg); |
599 | ||
d6d48196 JA |
600 | static inline int ll_new_hw_segment(struct request_queue *q, |
601 | struct request *req, | |
602 | struct bio *bio) | |
603 | { | |
d6d48196 JA |
604 | int nr_phys_segs = bio_phys_segments(q, bio); |
605 | ||
13f05c8d MP |
606 | if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q)) |
607 | goto no_merge; | |
608 | ||
4eaf99be | 609 | if (blk_integrity_merge_bio(q, req, bio) == false) |
13f05c8d | 610 | goto no_merge; |
d6d48196 JA |
611 | |
612 | /* | |
613 | * This will form the start of a new hw segment. Bump both | |
614 | * counters. | |
615 | */ | |
d6d48196 JA |
616 | req->nr_phys_segments += nr_phys_segs; |
617 | return 1; | |
13f05c8d MP |
618 | |
619 | no_merge: | |
e0c72300 | 620 | req_set_nomerge(q, req); |
13f05c8d | 621 | return 0; |
d6d48196 JA |
622 | } |
623 | ||
624 | int ll_back_merge_fn(struct request_queue *q, struct request *req, | |
625 | struct bio *bio) | |
626 | { | |
5e7c4274 JA |
627 | if (req_gap_back_merge(req, bio)) |
628 | return 0; | |
7f39add3 SG |
629 | if (blk_integrity_rq(req) && |
630 | integrity_req_gap_back_merge(req, bio)) | |
631 | return 0; | |
f31dc1cd | 632 | if (blk_rq_sectors(req) + bio_sectors(bio) > |
17007f39 | 633 | blk_rq_get_max_sectors(req, blk_rq_pos(req))) { |
e0c72300 | 634 | req_set_nomerge(q, req); |
d6d48196 JA |
635 | return 0; |
636 | } | |
2cdf79ca | 637 | if (!bio_flagged(req->biotail, BIO_SEG_VALID)) |
d6d48196 | 638 | blk_recount_segments(q, req->biotail); |
2cdf79ca | 639 | if (!bio_flagged(bio, BIO_SEG_VALID)) |
d6d48196 | 640 | blk_recount_segments(q, bio); |
d6d48196 JA |
641 | |
642 | return ll_new_hw_segment(q, req, bio); | |
643 | } | |
644 | ||
6728cb0e | 645 | int ll_front_merge_fn(struct request_queue *q, struct request *req, |
d6d48196 JA |
646 | struct bio *bio) |
647 | { | |
5e7c4274 JA |
648 | |
649 | if (req_gap_front_merge(req, bio)) | |
650 | return 0; | |
7f39add3 SG |
651 | if (blk_integrity_rq(req) && |
652 | integrity_req_gap_front_merge(req, bio)) | |
653 | return 0; | |
f31dc1cd | 654 | if (blk_rq_sectors(req) + bio_sectors(bio) > |
17007f39 | 655 | blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) { |
e0c72300 | 656 | req_set_nomerge(q, req); |
d6d48196 JA |
657 | return 0; |
658 | } | |
2cdf79ca | 659 | if (!bio_flagged(bio, BIO_SEG_VALID)) |
d6d48196 | 660 | blk_recount_segments(q, bio); |
2cdf79ca | 661 | if (!bio_flagged(req->bio, BIO_SEG_VALID)) |
d6d48196 | 662 | blk_recount_segments(q, req->bio); |
d6d48196 JA |
663 | |
664 | return ll_new_hw_segment(q, req, bio); | |
665 | } | |
666 | ||
445251d0 JA |
667 | static bool req_attempt_discard_merge(struct request_queue *q, struct request *req, |
668 | struct request *next) | |
669 | { | |
670 | unsigned short segments = blk_rq_nr_discard_segments(req); | |
671 | ||
672 | if (segments >= queue_max_discard_segments(q)) | |
673 | goto no_merge; | |
674 | if (blk_rq_sectors(req) + bio_sectors(next->bio) > | |
675 | blk_rq_get_max_sectors(req, blk_rq_pos(req))) | |
676 | goto no_merge; | |
677 | ||
678 | req->nr_phys_segments = segments + blk_rq_nr_discard_segments(next); | |
679 | return true; | |
680 | no_merge: | |
681 | req_set_nomerge(q, req); | |
682 | return false; | |
683 | } | |
684 | ||
d6d48196 JA |
685 | static int ll_merge_requests_fn(struct request_queue *q, struct request *req, |
686 | struct request *next) | |
687 | { | |
688 | int total_phys_segments; | |
86771427 FT |
689 | unsigned int seg_size = |
690 | req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size; | |
d6d48196 | 691 | |
5e7c4274 | 692 | if (req_gap_back_merge(req, next->bio)) |
854fbb9c KB |
693 | return 0; |
694 | ||
d6d48196 JA |
695 | /* |
696 | * Will it become too large? | |
697 | */ | |
f31dc1cd | 698 | if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > |
17007f39 | 699 | blk_rq_get_max_sectors(req, blk_rq_pos(req))) |
d6d48196 JA |
700 | return 0; |
701 | ||
702 | total_phys_segments = req->nr_phys_segments + next->nr_phys_segments; | |
86771427 FT |
703 | if (blk_phys_contig_segment(q, req->biotail, next->bio)) { |
704 | if (req->nr_phys_segments == 1) | |
705 | req->bio->bi_seg_front_size = seg_size; | |
706 | if (next->nr_phys_segments == 1) | |
707 | next->biotail->bi_seg_back_size = seg_size; | |
d6d48196 | 708 | total_phys_segments--; |
86771427 | 709 | } |
d6d48196 | 710 | |
8a78362c | 711 | if (total_phys_segments > queue_max_segments(q)) |
d6d48196 JA |
712 | return 0; |
713 | ||
4eaf99be | 714 | if (blk_integrity_merge_rq(q, req, next) == false) |
13f05c8d MP |
715 | return 0; |
716 | ||
d6d48196 JA |
717 | /* Merge is OK... */ |
718 | req->nr_phys_segments = total_phys_segments; | |
d6d48196 JA |
719 | return 1; |
720 | } | |
721 | ||
80a761fd TH |
722 | /** |
723 | * blk_rq_set_mixed_merge - mark a request as mixed merge | |
724 | * @rq: request to mark as mixed merge | |
725 | * | |
726 | * Description: | |
727 | * @rq is about to be mixed merged. Make sure the attributes | |
728 | * which can be mixed are set in each bio and mark @rq as mixed | |
729 | * merged. | |
730 | */ | |
731 | void blk_rq_set_mixed_merge(struct request *rq) | |
732 | { | |
733 | unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK; | |
734 | struct bio *bio; | |
735 | ||
e8064021 | 736 | if (rq->rq_flags & RQF_MIXED_MERGE) |
80a761fd TH |
737 | return; |
738 | ||
739 | /* | |
740 | * @rq will no longer represent mixable attributes for all the | |
741 | * contained bios. It will just track those of the first one. | |
742 | * Distributes the attributs to each bio. | |
743 | */ | |
744 | for (bio = rq->bio; bio; bio = bio->bi_next) { | |
1eff9d32 JA |
745 | WARN_ON_ONCE((bio->bi_opf & REQ_FAILFAST_MASK) && |
746 | (bio->bi_opf & REQ_FAILFAST_MASK) != ff); | |
747 | bio->bi_opf |= ff; | |
80a761fd | 748 | } |
e8064021 | 749 | rq->rq_flags |= RQF_MIXED_MERGE; |
80a761fd TH |
750 | } |
751 | ||
26308eab JM |
752 | static void blk_account_io_merge(struct request *req) |
753 | { | |
754 | if (blk_do_io_stat(req)) { | |
755 | struct hd_struct *part; | |
26308eab | 756 | |
112f158f | 757 | part_stat_lock(); |
09e099d4 | 758 | part = req->part; |
26308eab | 759 | |
d62e26b3 | 760 | part_dec_in_flight(req->q, part, rq_data_dir(req)); |
26308eab | 761 | |
6c23a968 | 762 | hd_struct_put(part); |
26308eab JM |
763 | part_stat_unlock(); |
764 | } | |
765 | } | |
69840466 JW |
766 | /* |
767 | * Two cases of handling DISCARD merge: | |
768 | * If max_discard_segments > 1, the driver takes every bio | |
769 | * as a range and send them to controller together. The ranges | |
770 | * needn't to be contiguous. | |
771 | * Otherwise, the bios/requests will be handled as same as | |
772 | * others which should be contiguous. | |
773 | */ | |
774 | static inline bool blk_discard_mergable(struct request *req) | |
775 | { | |
776 | if (req_op(req) == REQ_OP_DISCARD && | |
777 | queue_max_discard_segments(req->q) > 1) | |
778 | return true; | |
779 | return false; | |
780 | } | |
781 | ||
e96c0d83 EB |
782 | static enum elv_merge blk_try_req_merge(struct request *req, |
783 | struct request *next) | |
69840466 JW |
784 | { |
785 | if (blk_discard_mergable(req)) | |
786 | return ELEVATOR_DISCARD_MERGE; | |
787 | else if (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos(next)) | |
788 | return ELEVATOR_BACK_MERGE; | |
789 | ||
790 | return ELEVATOR_NO_MERGE; | |
791 | } | |
26308eab | 792 | |
d6d48196 | 793 | /* |
b973cb7e JA |
794 | * For non-mq, this has to be called with the request spinlock acquired. |
795 | * For mq with scheduling, the appropriate queue wide lock should be held. | |
d6d48196 | 796 | */ |
b973cb7e JA |
797 | static struct request *attempt_merge(struct request_queue *q, |
798 | struct request *req, struct request *next) | |
d6d48196 JA |
799 | { |
800 | if (!rq_mergeable(req) || !rq_mergeable(next)) | |
b973cb7e | 801 | return NULL; |
d6d48196 | 802 | |
288dab8a | 803 | if (req_op(req) != req_op(next)) |
b973cb7e | 804 | return NULL; |
f31dc1cd | 805 | |
d6d48196 | 806 | if (rq_data_dir(req) != rq_data_dir(next) |
2081a56b | 807 | || req->rq_disk != next->rq_disk) |
b973cb7e | 808 | return NULL; |
d6d48196 | 809 | |
8fe0d473 | 810 | if (req_op(req) == REQ_OP_WRITE_SAME && |
4363ac7c | 811 | !blk_write_same_mergeable(req->bio, next->bio)) |
b973cb7e | 812 | return NULL; |
4363ac7c | 813 | |
cb6934f8 JA |
814 | /* |
815 | * Don't allow merge of different write hints, or for a hint with | |
816 | * non-hint IO. | |
817 | */ | |
818 | if (req->write_hint != next->write_hint) | |
819 | return NULL; | |
820 | ||
668ffc03 DLM |
821 | if (req->ioprio != next->ioprio) |
822 | return NULL; | |
823 | ||
d6d48196 JA |
824 | /* |
825 | * If we are allowed to merge, then append bio list | |
826 | * from next to rq and release next. merge_requests_fn | |
827 | * will have updated segment counts, update sector | |
445251d0 JA |
828 | * counts here. Handle DISCARDs separately, as they |
829 | * have separate settings. | |
d6d48196 | 830 | */ |
69840466 JW |
831 | |
832 | switch (blk_try_req_merge(req, next)) { | |
833 | case ELEVATOR_DISCARD_MERGE: | |
445251d0 JA |
834 | if (!req_attempt_discard_merge(q, req, next)) |
835 | return NULL; | |
69840466 JW |
836 | break; |
837 | case ELEVATOR_BACK_MERGE: | |
838 | if (!ll_merge_requests_fn(q, req, next)) | |
839 | return NULL; | |
840 | break; | |
841 | default: | |
b973cb7e | 842 | return NULL; |
69840466 | 843 | } |
d6d48196 | 844 | |
80a761fd TH |
845 | /* |
846 | * If failfast settings disagree or any of the two is already | |
847 | * a mixed merge, mark both as mixed before proceeding. This | |
848 | * makes sure that all involved bios have mixable attributes | |
849 | * set properly. | |
850 | */ | |
e8064021 | 851 | if (((req->rq_flags | next->rq_flags) & RQF_MIXED_MERGE) || |
80a761fd TH |
852 | (req->cmd_flags & REQ_FAILFAST_MASK) != |
853 | (next->cmd_flags & REQ_FAILFAST_MASK)) { | |
854 | blk_rq_set_mixed_merge(req); | |
855 | blk_rq_set_mixed_merge(next); | |
856 | } | |
857 | ||
d6d48196 | 858 | /* |
522a7775 OS |
859 | * At this point we have either done a back merge or front merge. We |
860 | * need the smaller start_time_ns of the merged requests to be the | |
861 | * current request for accounting purposes. | |
d6d48196 | 862 | */ |
522a7775 OS |
863 | if (next->start_time_ns < req->start_time_ns) |
864 | req->start_time_ns = next->start_time_ns; | |
d6d48196 JA |
865 | |
866 | req->biotail->bi_next = next->bio; | |
867 | req->biotail = next->biotail; | |
868 | ||
a2dec7b3 | 869 | req->__data_len += blk_rq_bytes(next); |
d6d48196 | 870 | |
2a5cf35c | 871 | if (!blk_discard_mergable(req)) |
445251d0 | 872 | elv_merge_requests(q, req, next); |
d6d48196 | 873 | |
42dad764 JM |
874 | /* |
875 | * 'next' is going away, so update stats accordingly | |
876 | */ | |
877 | blk_account_io_merge(next); | |
d6d48196 | 878 | |
e4d750c9 JA |
879 | /* |
880 | * ownership of bio passed from next to req, return 'next' for | |
881 | * the caller to free | |
882 | */ | |
1cd96c24 | 883 | next->bio = NULL; |
b973cb7e | 884 | return next; |
d6d48196 JA |
885 | } |
886 | ||
b973cb7e | 887 | struct request *attempt_back_merge(struct request_queue *q, struct request *rq) |
d6d48196 JA |
888 | { |
889 | struct request *next = elv_latter_request(q, rq); | |
890 | ||
891 | if (next) | |
892 | return attempt_merge(q, rq, next); | |
893 | ||
b973cb7e | 894 | return NULL; |
d6d48196 JA |
895 | } |
896 | ||
b973cb7e | 897 | struct request *attempt_front_merge(struct request_queue *q, struct request *rq) |
d6d48196 JA |
898 | { |
899 | struct request *prev = elv_former_request(q, rq); | |
900 | ||
901 | if (prev) | |
902 | return attempt_merge(q, prev, rq); | |
903 | ||
b973cb7e | 904 | return NULL; |
d6d48196 | 905 | } |
5e84ea3a JA |
906 | |
907 | int blk_attempt_req_merge(struct request_queue *q, struct request *rq, | |
908 | struct request *next) | |
909 | { | |
e4d750c9 | 910 | struct request *free; |
72ef799b | 911 | |
e4d750c9 JA |
912 | free = attempt_merge(q, rq, next); |
913 | if (free) { | |
92bc5a24 | 914 | blk_put_request(free); |
e4d750c9 JA |
915 | return 1; |
916 | } | |
917 | ||
918 | return 0; | |
5e84ea3a | 919 | } |
050c8ea8 TH |
920 | |
921 | bool blk_rq_merge_ok(struct request *rq, struct bio *bio) | |
922 | { | |
e2a60da7 | 923 | if (!rq_mergeable(rq) || !bio_mergeable(bio)) |
050c8ea8 TH |
924 | return false; |
925 | ||
288dab8a | 926 | if (req_op(rq) != bio_op(bio)) |
f31dc1cd MP |
927 | return false; |
928 | ||
050c8ea8 TH |
929 | /* different data direction or already started, don't merge */ |
930 | if (bio_data_dir(bio) != rq_data_dir(rq)) | |
931 | return false; | |
932 | ||
2081a56b JA |
933 | /* must be same device */ |
934 | if (rq->rq_disk != bio->bi_disk) | |
050c8ea8 TH |
935 | return false; |
936 | ||
937 | /* only merge integrity protected bio into ditto rq */ | |
4eaf99be | 938 | if (blk_integrity_merge_bio(rq->q, rq, bio) == false) |
050c8ea8 TH |
939 | return false; |
940 | ||
4363ac7c | 941 | /* must be using the same buffer */ |
8fe0d473 | 942 | if (req_op(rq) == REQ_OP_WRITE_SAME && |
4363ac7c MP |
943 | !blk_write_same_mergeable(rq->bio, bio)) |
944 | return false; | |
945 | ||
cb6934f8 JA |
946 | /* |
947 | * Don't allow merge of different write hints, or for a hint with | |
948 | * non-hint IO. | |
949 | */ | |
950 | if (rq->write_hint != bio->bi_write_hint) | |
951 | return false; | |
952 | ||
668ffc03 DLM |
953 | if (rq->ioprio != bio_prio(bio)) |
954 | return false; | |
955 | ||
050c8ea8 TH |
956 | return true; |
957 | } | |
958 | ||
34fe7c05 | 959 | enum elv_merge blk_try_merge(struct request *rq, struct bio *bio) |
050c8ea8 | 960 | { |
69840466 | 961 | if (blk_discard_mergable(rq)) |
1e739730 CH |
962 | return ELEVATOR_DISCARD_MERGE; |
963 | else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector) | |
050c8ea8 | 964 | return ELEVATOR_BACK_MERGE; |
4f024f37 | 965 | else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector) |
050c8ea8 TH |
966 | return ELEVATOR_FRONT_MERGE; |
967 | return ELEVATOR_NO_MERGE; | |
968 | } |