powerpc/radix: Fix kernel crash with mremap()
[linux-2.6-block.git] / block / blk-merge.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
d6d48196
JA
2/*
3 * Functions related to segment and merge handling
4 */
5#include <linux/kernel.h>
6#include <linux/module.h>
7#include <linux/bio.h>
8#include <linux/blkdev.h>
9#include <linux/scatterlist.h>
10
cda22646
MK
11#include <trace/events/block.h>
12
d6d48196
JA
13#include "blk.h"
14
e9907009
CH
15/*
16 * Check if the two bvecs from two bios can be merged to one segment. If yes,
17 * no need to check gap between the two bios since the 1st bio and the 1st bvec
18 * in the 2nd bio can be handled in one segment.
19 */
20static inline bool bios_segs_mergeable(struct request_queue *q,
21 struct bio *prev, struct bio_vec *prev_last_bv,
22 struct bio_vec *next_first_bv)
23{
3dccdae5 24 if (!biovec_phys_mergeable(q, prev_last_bv, next_first_bv))
e9907009
CH
25 return false;
26 if (prev->bi_seg_back_size + next_first_bv->bv_len >
27 queue_max_segment_size(q))
28 return false;
29 return true;
30}
31
32static inline bool bio_will_gap(struct request_queue *q,
33 struct request *prev_rq, struct bio *prev, struct bio *next)
34{
35 struct bio_vec pb, nb;
36
37 if (!bio_has_data(prev) || !queue_virt_boundary(q))
38 return false;
39
40 /*
41 * Don't merge if the 1st bio starts with non-zero offset, otherwise it
42 * is quite difficult to respect the sg gap limit. We work hard to
43 * merge a huge number of small single bios in case of mkfs.
44 */
45 if (prev_rq)
46 bio_get_first_bvec(prev_rq->bio, &pb);
47 else
48 bio_get_first_bvec(prev, &pb);
df376b2e 49 if (pb.bv_offset & queue_virt_boundary(q))
e9907009
CH
50 return true;
51
52 /*
53 * We don't need to worry about the situation that the merged segment
54 * ends in unaligned virt boundary:
55 *
56 * - if 'pb' ends aligned, the merged segment ends aligned
57 * - if 'pb' ends unaligned, the next bio must include
58 * one single bvec of 'nb', otherwise the 'nb' can't
59 * merge with 'pb'
60 */
61 bio_get_last_bvec(prev, &pb);
62 bio_get_first_bvec(next, &nb);
63 if (bios_segs_mergeable(q, prev, &pb, &nb))
64 return false;
65 return __bvec_gap_to_prev(q, &pb, nb.bv_offset);
66}
67
68static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
69{
70 return bio_will_gap(req->q, req, req->biotail, bio);
71}
72
73static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
74{
75 return bio_will_gap(req->q, NULL, bio, req->bio);
76}
77
54efd50b
KO
78static struct bio *blk_bio_discard_split(struct request_queue *q,
79 struct bio *bio,
bdced438
ML
80 struct bio_set *bs,
81 unsigned *nsegs)
54efd50b
KO
82{
83 unsigned int max_discard_sectors, granularity;
84 int alignment;
85 sector_t tmp;
86 unsigned split_sectors;
87
bdced438
ML
88 *nsegs = 1;
89
54efd50b
KO
90 /* Zero-sector (unknown) and one-sector granularities are the same. */
91 granularity = max(q->limits.discard_granularity >> 9, 1U);
92
1adfc5e4
ML
93 max_discard_sectors = min(q->limits.max_discard_sectors,
94 bio_allowed_max_sectors(q));
54efd50b
KO
95 max_discard_sectors -= max_discard_sectors % granularity;
96
97 if (unlikely(!max_discard_sectors)) {
98 /* XXX: warn */
99 return NULL;
100 }
101
102 if (bio_sectors(bio) <= max_discard_sectors)
103 return NULL;
104
105 split_sectors = max_discard_sectors;
106
107 /*
108 * If the next starting sector would be misaligned, stop the discard at
109 * the previous aligned sector.
110 */
111 alignment = (q->limits.discard_alignment >> 9) % granularity;
112
113 tmp = bio->bi_iter.bi_sector + split_sectors - alignment;
114 tmp = sector_div(tmp, granularity);
115
116 if (split_sectors > tmp)
117 split_sectors -= tmp;
118
119 return bio_split(bio, split_sectors, GFP_NOIO, bs);
120}
121
885fa13f
CH
122static struct bio *blk_bio_write_zeroes_split(struct request_queue *q,
123 struct bio *bio, struct bio_set *bs, unsigned *nsegs)
124{
125 *nsegs = 1;
126
127 if (!q->limits.max_write_zeroes_sectors)
128 return NULL;
129
130 if (bio_sectors(bio) <= q->limits.max_write_zeroes_sectors)
131 return NULL;
132
133 return bio_split(bio, q->limits.max_write_zeroes_sectors, GFP_NOIO, bs);
134}
135
54efd50b
KO
136static struct bio *blk_bio_write_same_split(struct request_queue *q,
137 struct bio *bio,
bdced438
ML
138 struct bio_set *bs,
139 unsigned *nsegs)
54efd50b 140{
bdced438
ML
141 *nsegs = 1;
142
54efd50b
KO
143 if (!q->limits.max_write_same_sectors)
144 return NULL;
145
146 if (bio_sectors(bio) <= q->limits.max_write_same_sectors)
147 return NULL;
148
149 return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs);
150}
151
d0e5fbb0
ML
152static inline unsigned get_max_io_size(struct request_queue *q,
153 struct bio *bio)
154{
155 unsigned sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector);
156 unsigned mask = queue_logical_block_size(q) - 1;
157
158 /* aligned to logical block size */
159 sectors &= ~(mask >> 9);
160
161 return sectors;
162}
163
54efd50b
KO
164static struct bio *blk_bio_segment_split(struct request_queue *q,
165 struct bio *bio,
bdced438
ML
166 struct bio_set *bs,
167 unsigned *segs)
54efd50b 168{
5014c311 169 struct bio_vec bv, bvprv, *bvprvp = NULL;
54efd50b 170 struct bvec_iter iter;
8ae12666 171 unsigned seg_size = 0, nsegs = 0, sectors = 0;
02e70742
ML
172 unsigned front_seg_size = bio->bi_seg_front_size;
173 bool do_split = true;
174 struct bio *new = NULL;
d0e5fbb0 175 const unsigned max_sectors = get_max_io_size(q, bio);
54efd50b 176
54efd50b 177 bio_for_each_segment(bv, bio, iter) {
54efd50b
KO
178 /*
179 * If the queue doesn't support SG gaps and adding this
180 * offset would create a gap, disallow it.
181 */
5014c311 182 if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset))
54efd50b
KO
183 goto split;
184
d0e5fbb0 185 if (sectors + (bv.bv_len >> 9) > max_sectors) {
e36f6204
KB
186 /*
187 * Consider this a new segment if we're splitting in
188 * the middle of this vector.
189 */
190 if (nsegs < queue_max_segments(q) &&
d0e5fbb0 191 sectors < max_sectors) {
e36f6204 192 nsegs++;
d0e5fbb0 193 sectors = max_sectors;
e36f6204 194 }
cf8c0c6a 195 goto split;
e36f6204
KB
196 }
197
38417468 198 if (bvprvp) {
b4b6cb61
ML
199 if (seg_size + bv.bv_len > queue_max_segment_size(q))
200 goto new_segment;
3dccdae5 201 if (!biovec_phys_mergeable(q, bvprvp, &bv))
54efd50b
KO
202 goto new_segment;
203
204 seg_size += bv.bv_len;
205 bvprv = bv;
578270bf 206 bvprvp = &bvprv;
52cc6eea 207 sectors += bv.bv_len >> 9;
a88d32af 208
54efd50b
KO
209 continue;
210 }
211new_segment:
212 if (nsegs == queue_max_segments(q))
213 goto split;
214
6a501bf0
ML
215 if (nsegs == 1 && seg_size > front_seg_size)
216 front_seg_size = seg_size;
217
54efd50b
KO
218 nsegs++;
219 bvprv = bv;
578270bf 220 bvprvp = &bvprv;
54efd50b 221 seg_size = bv.bv_len;
52cc6eea 222 sectors += bv.bv_len >> 9;
02e70742 223
54efd50b
KO
224 }
225
02e70742 226 do_split = false;
54efd50b 227split:
bdced438 228 *segs = nsegs;
02e70742
ML
229
230 if (do_split) {
231 new = bio_split(bio, sectors, GFP_NOIO, bs);
232 if (new)
233 bio = new;
234 }
235
6a501bf0
ML
236 if (nsegs == 1 && seg_size > front_seg_size)
237 front_seg_size = seg_size;
02e70742
ML
238 bio->bi_seg_front_size = front_seg_size;
239 if (seg_size > bio->bi_seg_back_size)
240 bio->bi_seg_back_size = seg_size;
241
242 return do_split ? new : NULL;
54efd50b
KO
243}
244
af67c31f 245void blk_queue_split(struct request_queue *q, struct bio **bio)
54efd50b 246{
bdced438
ML
247 struct bio *split, *res;
248 unsigned nsegs;
54efd50b 249
7afafc8a
AH
250 switch (bio_op(*bio)) {
251 case REQ_OP_DISCARD:
252 case REQ_OP_SECURE_ERASE:
338aa96d 253 split = blk_bio_discard_split(q, *bio, &q->bio_split, &nsegs);
7afafc8a 254 break;
a6f0788e 255 case REQ_OP_WRITE_ZEROES:
338aa96d 256 split = blk_bio_write_zeroes_split(q, *bio, &q->bio_split, &nsegs);
a6f0788e 257 break;
7afafc8a 258 case REQ_OP_WRITE_SAME:
338aa96d 259 split = blk_bio_write_same_split(q, *bio, &q->bio_split, &nsegs);
7afafc8a
AH
260 break;
261 default:
338aa96d 262 split = blk_bio_segment_split(q, *bio, &q->bio_split, &nsegs);
7afafc8a
AH
263 break;
264 }
bdced438
ML
265
266 /* physical segments can be figured out during splitting */
267 res = split ? split : *bio;
268 res->bi_phys_segments = nsegs;
269 bio_set_flag(res, BIO_SEG_VALID);
54efd50b
KO
270
271 if (split) {
6ac45aeb 272 /* there isn't chance to merge the splitted bio */
1eff9d32 273 split->bi_opf |= REQ_NOMERGE;
6ac45aeb 274
cd4a4ae4
JA
275 /*
276 * Since we're recursing into make_request here, ensure
277 * that we mark this bio as already having entered the queue.
278 * If not, and the queue is going away, we can get stuck
279 * forever on waiting for the queue reference to drop. But
280 * that will never happen, as we're already holding a
281 * reference to it.
282 */
283 bio_set_flag(*bio, BIO_QUEUE_ENTERED);
284
54efd50b 285 bio_chain(split, *bio);
cda22646 286 trace_block_split(q, split, (*bio)->bi_iter.bi_sector);
54efd50b
KO
287 generic_make_request(*bio);
288 *bio = split;
289 }
290}
291EXPORT_SYMBOL(blk_queue_split);
292
1e428079 293static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
07388549
ML
294 struct bio *bio,
295 bool no_sg_merge)
d6d48196 296{
7988613b 297 struct bio_vec bv, bvprv = { NULL };
38417468 298 int prev = 0;
1e428079 299 unsigned int seg_size, nr_phys_segs;
59247eae 300 struct bio *fbio, *bbio;
7988613b 301 struct bvec_iter iter;
d6d48196 302
1e428079
JA
303 if (!bio)
304 return 0;
d6d48196 305
a6f0788e
CK
306 switch (bio_op(bio)) {
307 case REQ_OP_DISCARD:
308 case REQ_OP_SECURE_ERASE:
a6f0788e 309 case REQ_OP_WRITE_ZEROES:
f9d03f96
CH
310 return 0;
311 case REQ_OP_WRITE_SAME:
5cb8850c 312 return 1;
a6f0788e 313 }
5cb8850c 314
1e428079 315 fbio = bio;
5df97b91 316 seg_size = 0;
2c8919de 317 nr_phys_segs = 0;
1e428079 318 for_each_bio(bio) {
7988613b 319 bio_for_each_segment(bv, bio, iter) {
05f1dd53
JA
320 /*
321 * If SG merging is disabled, each bio vector is
322 * a segment
323 */
324 if (no_sg_merge)
325 goto new_segment;
326
38417468 327 if (prev) {
7988613b 328 if (seg_size + bv.bv_len
ae03bf63 329 > queue_max_segment_size(q))
1e428079 330 goto new_segment;
3dccdae5 331 if (!biovec_phys_mergeable(q, &bvprv, &bv))
1e428079 332 goto new_segment;
d6d48196 333
7988613b 334 seg_size += bv.bv_len;
1e428079
JA
335 bvprv = bv;
336 continue;
337 }
d6d48196 338new_segment:
1e428079
JA
339 if (nr_phys_segs == 1 && seg_size >
340 fbio->bi_seg_front_size)
341 fbio->bi_seg_front_size = seg_size;
86771427 342
1e428079
JA
343 nr_phys_segs++;
344 bvprv = bv;
54efd50b 345 prev = 1;
7988613b 346 seg_size = bv.bv_len;
1e428079 347 }
59247eae 348 bbio = bio;
d6d48196
JA
349 }
350
59247eae
JA
351 if (nr_phys_segs == 1 && seg_size > fbio->bi_seg_front_size)
352 fbio->bi_seg_front_size = seg_size;
353 if (seg_size > bbio->bi_seg_back_size)
354 bbio->bi_seg_back_size = seg_size;
1e428079
JA
355
356 return nr_phys_segs;
357}
358
359void blk_recalc_rq_segments(struct request *rq)
360{
07388549
ML
361 bool no_sg_merge = !!test_bit(QUEUE_FLAG_NO_SG_MERGE,
362 &rq->q->queue_flags);
363
364 rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio,
365 no_sg_merge);
d6d48196
JA
366}
367
368void blk_recount_segments(struct request_queue *q, struct bio *bio)
369{
7f60dcaa
ML
370 unsigned short seg_cnt;
371
372 /* estimate segment number by bi_vcnt for non-cloned bio */
373 if (bio_flagged(bio, BIO_CLONED))
374 seg_cnt = bio_segments(bio);
375 else
376 seg_cnt = bio->bi_vcnt;
764f612c 377
7f60dcaa
ML
378 if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags) &&
379 (seg_cnt < queue_max_segments(q)))
380 bio->bi_phys_segments = seg_cnt;
05f1dd53
JA
381 else {
382 struct bio *nxt = bio->bi_next;
383
384 bio->bi_next = NULL;
7f60dcaa 385 bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, false);
05f1dd53
JA
386 bio->bi_next = nxt;
387 }
1e428079 388
b7c44ed9 389 bio_set_flag(bio, BIO_SEG_VALID);
d6d48196 390}
d6d48196
JA
391
392static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
393 struct bio *nxt)
394{
2b8221e1 395 struct bio_vec end_bv = { NULL }, nxt_bv;
f619d254 396
86771427 397 if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
ae03bf63 398 queue_max_segment_size(q))
d6d48196
JA
399 return 0;
400
e17fc0a1
DW
401 if (!bio_has_data(bio))
402 return 1;
403
e827091c
ML
404 bio_get_last_bvec(bio, &end_bv);
405 bio_get_first_bvec(nxt, &nxt_bv);
f619d254 406
3dccdae5 407 return biovec_phys_mergeable(q, &end_bv, &nxt_bv);
d6d48196
JA
408}
409
7988613b 410static inline void
963ab9e5 411__blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
7988613b 412 struct scatterlist *sglist, struct bio_vec *bvprv,
38417468 413 struct scatterlist **sg, int *nsegs)
963ab9e5
AH
414{
415
416 int nbytes = bvec->bv_len;
417
38417468 418 if (*sg) {
b4b6cb61
ML
419 if ((*sg)->length + nbytes > queue_max_segment_size(q))
420 goto new_segment;
3dccdae5 421 if (!biovec_phys_mergeable(q, bvprv, bvec))
963ab9e5
AH
422 goto new_segment;
423
424 (*sg)->length += nbytes;
425 } else {
426new_segment:
427 if (!*sg)
428 *sg = sglist;
429 else {
430 /*
431 * If the driver previously mapped a shorter
432 * list, we could see a termination bit
433 * prematurely unless it fully inits the sg
434 * table on each mapping. We KNOW that there
435 * must be more entries here or the driver
436 * would be buggy, so force clear the
437 * termination bit to avoid doing a full
438 * sg_init_table() in drivers for each command.
439 */
c8164d89 440 sg_unmark_end(*sg);
963ab9e5
AH
441 *sg = sg_next(*sg);
442 }
443
444 sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset);
445 (*nsegs)++;
446 }
7988613b 447 *bvprv = *bvec;
963ab9e5
AH
448}
449
f9d03f96
CH
450static inline int __blk_bvec_map_sg(struct request_queue *q, struct bio_vec bv,
451 struct scatterlist *sglist, struct scatterlist **sg)
452{
453 *sg = sglist;
454 sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset);
455 return 1;
456}
457
5cb8850c
KO
458static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
459 struct scatterlist *sglist,
460 struct scatterlist **sg)
d6d48196 461{
2b8221e1 462 struct bio_vec bvec, bvprv = { NULL };
5cb8850c 463 struct bvec_iter iter;
38417468 464 int nsegs = 0;
5cb8850c
KO
465
466 for_each_bio(bio)
467 bio_for_each_segment(bvec, bio, iter)
468 __blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg,
38417468 469 &nsegs);
d6d48196 470
5cb8850c
KO
471 return nsegs;
472}
473
474/*
475 * map a request to scatterlist, return number of sg entries setup. Caller
476 * must make sure sg can hold rq->nr_phys_segments entries
477 */
478int blk_rq_map_sg(struct request_queue *q, struct request *rq,
479 struct scatterlist *sglist)
480{
481 struct scatterlist *sg = NULL;
482 int nsegs = 0;
483
f9d03f96
CH
484 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
485 nsegs = __blk_bvec_map_sg(q, rq->special_vec, sglist, &sg);
486 else if (rq->bio && bio_op(rq->bio) == REQ_OP_WRITE_SAME)
487 nsegs = __blk_bvec_map_sg(q, bio_iovec(rq->bio), sglist, &sg);
488 else if (rq->bio)
5cb8850c 489 nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg);
f18573ab 490
e8064021 491 if (unlikely(rq->rq_flags & RQF_COPY_USER) &&
2e46e8b2
TH
492 (blk_rq_bytes(rq) & q->dma_pad_mask)) {
493 unsigned int pad_len =
494 (q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
f18573ab
FT
495
496 sg->length += pad_len;
497 rq->extra_len += pad_len;
498 }
499
2fb98e84 500 if (q->dma_drain_size && q->dma_drain_needed(rq)) {
a8ebb056 501 if (op_is_write(req_op(rq)))
db0a2e00
TH
502 memset(q->dma_drain_buffer, 0, q->dma_drain_size);
503
da81ed16 504 sg_unmark_end(sg);
d6d48196
JA
505 sg = sg_next(sg);
506 sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
507 q->dma_drain_size,
508 ((unsigned long)q->dma_drain_buffer) &
509 (PAGE_SIZE - 1));
510 nsegs++;
7a85f889 511 rq->extra_len += q->dma_drain_size;
d6d48196
JA
512 }
513
514 if (sg)
515 sg_mark_end(sg);
516
12e57f59
ML
517 /*
518 * Something must have been wrong if the figured number of
519 * segment is bigger than number of req's physical segments
520 */
f9d03f96 521 WARN_ON(nsegs > blk_rq_nr_phys_segments(rq));
12e57f59 522
d6d48196
JA
523 return nsegs;
524}
d6d48196
JA
525EXPORT_SYMBOL(blk_rq_map_sg);
526
d6d48196
JA
527static inline int ll_new_hw_segment(struct request_queue *q,
528 struct request *req,
529 struct bio *bio)
530{
d6d48196
JA
531 int nr_phys_segs = bio_phys_segments(q, bio);
532
13f05c8d
MP
533 if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q))
534 goto no_merge;
535
4eaf99be 536 if (blk_integrity_merge_bio(q, req, bio) == false)
13f05c8d 537 goto no_merge;
d6d48196
JA
538
539 /*
540 * This will form the start of a new hw segment. Bump both
541 * counters.
542 */
d6d48196
JA
543 req->nr_phys_segments += nr_phys_segs;
544 return 1;
13f05c8d
MP
545
546no_merge:
e0c72300 547 req_set_nomerge(q, req);
13f05c8d 548 return 0;
d6d48196
JA
549}
550
551int ll_back_merge_fn(struct request_queue *q, struct request *req,
552 struct bio *bio)
553{
5e7c4274
JA
554 if (req_gap_back_merge(req, bio))
555 return 0;
7f39add3
SG
556 if (blk_integrity_rq(req) &&
557 integrity_req_gap_back_merge(req, bio))
558 return 0;
f31dc1cd 559 if (blk_rq_sectors(req) + bio_sectors(bio) >
17007f39 560 blk_rq_get_max_sectors(req, blk_rq_pos(req))) {
e0c72300 561 req_set_nomerge(q, req);
d6d48196
JA
562 return 0;
563 }
2cdf79ca 564 if (!bio_flagged(req->biotail, BIO_SEG_VALID))
d6d48196 565 blk_recount_segments(q, req->biotail);
2cdf79ca 566 if (!bio_flagged(bio, BIO_SEG_VALID))
d6d48196 567 blk_recount_segments(q, bio);
d6d48196
JA
568
569 return ll_new_hw_segment(q, req, bio);
570}
571
6728cb0e 572int ll_front_merge_fn(struct request_queue *q, struct request *req,
d6d48196
JA
573 struct bio *bio)
574{
5e7c4274
JA
575
576 if (req_gap_front_merge(req, bio))
577 return 0;
7f39add3
SG
578 if (blk_integrity_rq(req) &&
579 integrity_req_gap_front_merge(req, bio))
580 return 0;
f31dc1cd 581 if (blk_rq_sectors(req) + bio_sectors(bio) >
17007f39 582 blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) {
e0c72300 583 req_set_nomerge(q, req);
d6d48196
JA
584 return 0;
585 }
2cdf79ca 586 if (!bio_flagged(bio, BIO_SEG_VALID))
d6d48196 587 blk_recount_segments(q, bio);
2cdf79ca 588 if (!bio_flagged(req->bio, BIO_SEG_VALID))
d6d48196 589 blk_recount_segments(q, req->bio);
d6d48196
JA
590
591 return ll_new_hw_segment(q, req, bio);
592}
593
445251d0
JA
594static bool req_attempt_discard_merge(struct request_queue *q, struct request *req,
595 struct request *next)
596{
597 unsigned short segments = blk_rq_nr_discard_segments(req);
598
599 if (segments >= queue_max_discard_segments(q))
600 goto no_merge;
601 if (blk_rq_sectors(req) + bio_sectors(next->bio) >
602 blk_rq_get_max_sectors(req, blk_rq_pos(req)))
603 goto no_merge;
604
605 req->nr_phys_segments = segments + blk_rq_nr_discard_segments(next);
606 return true;
607no_merge:
608 req_set_nomerge(q, req);
609 return false;
610}
611
d6d48196
JA
612static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
613 struct request *next)
614{
615 int total_phys_segments;
86771427
FT
616 unsigned int seg_size =
617 req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size;
d6d48196 618
5e7c4274 619 if (req_gap_back_merge(req, next->bio))
854fbb9c
KB
620 return 0;
621
d6d48196
JA
622 /*
623 * Will it become too large?
624 */
f31dc1cd 625 if ((blk_rq_sectors(req) + blk_rq_sectors(next)) >
17007f39 626 blk_rq_get_max_sectors(req, blk_rq_pos(req)))
d6d48196
JA
627 return 0;
628
629 total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
86771427
FT
630 if (blk_phys_contig_segment(q, req->biotail, next->bio)) {
631 if (req->nr_phys_segments == 1)
632 req->bio->bi_seg_front_size = seg_size;
633 if (next->nr_phys_segments == 1)
634 next->biotail->bi_seg_back_size = seg_size;
d6d48196 635 total_phys_segments--;
86771427 636 }
d6d48196 637
8a78362c 638 if (total_phys_segments > queue_max_segments(q))
d6d48196
JA
639 return 0;
640
4eaf99be 641 if (blk_integrity_merge_rq(q, req, next) == false)
13f05c8d
MP
642 return 0;
643
d6d48196
JA
644 /* Merge is OK... */
645 req->nr_phys_segments = total_phys_segments;
d6d48196
JA
646 return 1;
647}
648
80a761fd
TH
649/**
650 * blk_rq_set_mixed_merge - mark a request as mixed merge
651 * @rq: request to mark as mixed merge
652 *
653 * Description:
654 * @rq is about to be mixed merged. Make sure the attributes
655 * which can be mixed are set in each bio and mark @rq as mixed
656 * merged.
657 */
658void blk_rq_set_mixed_merge(struct request *rq)
659{
660 unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
661 struct bio *bio;
662
e8064021 663 if (rq->rq_flags & RQF_MIXED_MERGE)
80a761fd
TH
664 return;
665
666 /*
667 * @rq will no longer represent mixable attributes for all the
668 * contained bios. It will just track those of the first one.
669 * Distributes the attributs to each bio.
670 */
671 for (bio = rq->bio; bio; bio = bio->bi_next) {
1eff9d32
JA
672 WARN_ON_ONCE((bio->bi_opf & REQ_FAILFAST_MASK) &&
673 (bio->bi_opf & REQ_FAILFAST_MASK) != ff);
674 bio->bi_opf |= ff;
80a761fd 675 }
e8064021 676 rq->rq_flags |= RQF_MIXED_MERGE;
80a761fd
TH
677}
678
26308eab
JM
679static void blk_account_io_merge(struct request *req)
680{
681 if (blk_do_io_stat(req)) {
682 struct hd_struct *part;
26308eab 683
112f158f 684 part_stat_lock();
09e099d4 685 part = req->part;
26308eab 686
d62e26b3 687 part_dec_in_flight(req->q, part, rq_data_dir(req));
26308eab 688
6c23a968 689 hd_struct_put(part);
26308eab
JM
690 part_stat_unlock();
691 }
692}
69840466
JW
693/*
694 * Two cases of handling DISCARD merge:
695 * If max_discard_segments > 1, the driver takes every bio
696 * as a range and send them to controller together. The ranges
697 * needn't to be contiguous.
698 * Otherwise, the bios/requests will be handled as same as
699 * others which should be contiguous.
700 */
701static inline bool blk_discard_mergable(struct request *req)
702{
703 if (req_op(req) == REQ_OP_DISCARD &&
704 queue_max_discard_segments(req->q) > 1)
705 return true;
706 return false;
707}
708
e96c0d83
EB
709static enum elv_merge blk_try_req_merge(struct request *req,
710 struct request *next)
69840466
JW
711{
712 if (blk_discard_mergable(req))
713 return ELEVATOR_DISCARD_MERGE;
714 else if (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos(next))
715 return ELEVATOR_BACK_MERGE;
716
717 return ELEVATOR_NO_MERGE;
718}
26308eab 719
d6d48196 720/*
b973cb7e
JA
721 * For non-mq, this has to be called with the request spinlock acquired.
722 * For mq with scheduling, the appropriate queue wide lock should be held.
d6d48196 723 */
b973cb7e
JA
724static struct request *attempt_merge(struct request_queue *q,
725 struct request *req, struct request *next)
d6d48196
JA
726{
727 if (!rq_mergeable(req) || !rq_mergeable(next))
b973cb7e 728 return NULL;
d6d48196 729
288dab8a 730 if (req_op(req) != req_op(next))
b973cb7e 731 return NULL;
f31dc1cd 732
d6d48196 733 if (rq_data_dir(req) != rq_data_dir(next)
2081a56b 734 || req->rq_disk != next->rq_disk)
b973cb7e 735 return NULL;
d6d48196 736
8fe0d473 737 if (req_op(req) == REQ_OP_WRITE_SAME &&
4363ac7c 738 !blk_write_same_mergeable(req->bio, next->bio))
b973cb7e 739 return NULL;
4363ac7c 740
cb6934f8
JA
741 /*
742 * Don't allow merge of different write hints, or for a hint with
743 * non-hint IO.
744 */
745 if (req->write_hint != next->write_hint)
746 return NULL;
747
668ffc03
DLM
748 if (req->ioprio != next->ioprio)
749 return NULL;
750
d6d48196
JA
751 /*
752 * If we are allowed to merge, then append bio list
753 * from next to rq and release next. merge_requests_fn
754 * will have updated segment counts, update sector
445251d0
JA
755 * counts here. Handle DISCARDs separately, as they
756 * have separate settings.
d6d48196 757 */
69840466
JW
758
759 switch (blk_try_req_merge(req, next)) {
760 case ELEVATOR_DISCARD_MERGE:
445251d0
JA
761 if (!req_attempt_discard_merge(q, req, next))
762 return NULL;
69840466
JW
763 break;
764 case ELEVATOR_BACK_MERGE:
765 if (!ll_merge_requests_fn(q, req, next))
766 return NULL;
767 break;
768 default:
b973cb7e 769 return NULL;
69840466 770 }
d6d48196 771
80a761fd
TH
772 /*
773 * If failfast settings disagree or any of the two is already
774 * a mixed merge, mark both as mixed before proceeding. This
775 * makes sure that all involved bios have mixable attributes
776 * set properly.
777 */
e8064021 778 if (((req->rq_flags | next->rq_flags) & RQF_MIXED_MERGE) ||
80a761fd
TH
779 (req->cmd_flags & REQ_FAILFAST_MASK) !=
780 (next->cmd_flags & REQ_FAILFAST_MASK)) {
781 blk_rq_set_mixed_merge(req);
782 blk_rq_set_mixed_merge(next);
783 }
784
d6d48196 785 /*
522a7775
OS
786 * At this point we have either done a back merge or front merge. We
787 * need the smaller start_time_ns of the merged requests to be the
788 * current request for accounting purposes.
d6d48196 789 */
522a7775
OS
790 if (next->start_time_ns < req->start_time_ns)
791 req->start_time_ns = next->start_time_ns;
d6d48196
JA
792
793 req->biotail->bi_next = next->bio;
794 req->biotail = next->biotail;
795
a2dec7b3 796 req->__data_len += blk_rq_bytes(next);
d6d48196 797
2a5cf35c 798 if (!blk_discard_mergable(req))
445251d0 799 elv_merge_requests(q, req, next);
d6d48196 800
42dad764
JM
801 /*
802 * 'next' is going away, so update stats accordingly
803 */
804 blk_account_io_merge(next);
d6d48196 805
e4d750c9
JA
806 /*
807 * ownership of bio passed from next to req, return 'next' for
808 * the caller to free
809 */
1cd96c24 810 next->bio = NULL;
b973cb7e 811 return next;
d6d48196
JA
812}
813
b973cb7e 814struct request *attempt_back_merge(struct request_queue *q, struct request *rq)
d6d48196
JA
815{
816 struct request *next = elv_latter_request(q, rq);
817
818 if (next)
819 return attempt_merge(q, rq, next);
820
b973cb7e 821 return NULL;
d6d48196
JA
822}
823
b973cb7e 824struct request *attempt_front_merge(struct request_queue *q, struct request *rq)
d6d48196
JA
825{
826 struct request *prev = elv_former_request(q, rq);
827
828 if (prev)
829 return attempt_merge(q, prev, rq);
830
b973cb7e 831 return NULL;
d6d48196 832}
5e84ea3a
JA
833
834int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
835 struct request *next)
836{
e4d750c9 837 struct request *free;
72ef799b 838
e4d750c9
JA
839 free = attempt_merge(q, rq, next);
840 if (free) {
92bc5a24 841 blk_put_request(free);
e4d750c9
JA
842 return 1;
843 }
844
845 return 0;
5e84ea3a 846}
050c8ea8
TH
847
848bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
849{
e2a60da7 850 if (!rq_mergeable(rq) || !bio_mergeable(bio))
050c8ea8
TH
851 return false;
852
288dab8a 853 if (req_op(rq) != bio_op(bio))
f31dc1cd
MP
854 return false;
855
050c8ea8
TH
856 /* different data direction or already started, don't merge */
857 if (bio_data_dir(bio) != rq_data_dir(rq))
858 return false;
859
2081a56b
JA
860 /* must be same device */
861 if (rq->rq_disk != bio->bi_disk)
050c8ea8
TH
862 return false;
863
864 /* only merge integrity protected bio into ditto rq */
4eaf99be 865 if (blk_integrity_merge_bio(rq->q, rq, bio) == false)
050c8ea8
TH
866 return false;
867
4363ac7c 868 /* must be using the same buffer */
8fe0d473 869 if (req_op(rq) == REQ_OP_WRITE_SAME &&
4363ac7c
MP
870 !blk_write_same_mergeable(rq->bio, bio))
871 return false;
872
cb6934f8
JA
873 /*
874 * Don't allow merge of different write hints, or for a hint with
875 * non-hint IO.
876 */
877 if (rq->write_hint != bio->bi_write_hint)
878 return false;
879
668ffc03
DLM
880 if (rq->ioprio != bio_prio(bio))
881 return false;
882
050c8ea8
TH
883 return true;
884}
885
34fe7c05 886enum elv_merge blk_try_merge(struct request *rq, struct bio *bio)
050c8ea8 887{
69840466 888 if (blk_discard_mergable(rq))
1e739730
CH
889 return ELEVATOR_DISCARD_MERGE;
890 else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
050c8ea8 891 return ELEVATOR_BACK_MERGE;
4f024f37 892 else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
050c8ea8
TH
893 return ELEVATOR_FRONT_MERGE;
894 return ELEVATOR_NO_MERGE;
895}