Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
d6d48196 JA |
2 | /* |
3 | * Functions related to segment and merge handling | |
4 | */ | |
5 | #include <linux/kernel.h> | |
6 | #include <linux/module.h> | |
7 | #include <linux/bio.h> | |
8 | #include <linux/blkdev.h> | |
fe45e630 | 9 | #include <linux/blk-integrity.h> |
d6d48196 | 10 | #include <linux/scatterlist.h> |
82d981d4 | 11 | #include <linux/part_stat.h> |
6b2b0459 | 12 | #include <linux/blk-cgroup.h> |
d6d48196 | 13 | |
cda22646 MK |
14 | #include <trace/events/block.h> |
15 | ||
d6d48196 | 16 | #include "blk.h" |
2aa7745b | 17 | #include "blk-mq-sched.h" |
8e756373 | 18 | #include "blk-rq-qos.h" |
a7b36ee6 | 19 | #include "blk-throttle.h" |
d6d48196 | 20 | |
ff18d77b CH |
21 | static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv) |
22 | { | |
23 | *bv = mp_bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter); | |
24 | } | |
25 | ||
26 | static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv) | |
27 | { | |
28 | struct bvec_iter iter = bio->bi_iter; | |
29 | int idx; | |
30 | ||
31 | bio_get_first_bvec(bio, bv); | |
32 | if (bv->bv_len == bio->bi_iter.bi_size) | |
33 | return; /* this bio only has a single bvec */ | |
34 | ||
35 | bio_advance_iter(bio, &iter, iter.bi_size); | |
36 | ||
37 | if (!iter.bi_bvec_done) | |
38 | idx = iter.bi_idx - 1; | |
39 | else /* in the middle of bvec */ | |
40 | idx = iter.bi_idx; | |
41 | ||
42 | *bv = bio->bi_io_vec[idx]; | |
43 | ||
44 | /* | |
45 | * iter.bi_bvec_done records actual length of the last bvec | |
46 | * if this bio ends in the middle of one io vector | |
47 | */ | |
48 | if (iter.bi_bvec_done) | |
49 | bv->bv_len = iter.bi_bvec_done; | |
50 | } | |
51 | ||
e9907009 CH |
52 | static inline bool bio_will_gap(struct request_queue *q, |
53 | struct request *prev_rq, struct bio *prev, struct bio *next) | |
54 | { | |
55 | struct bio_vec pb, nb; | |
56 | ||
57 | if (!bio_has_data(prev) || !queue_virt_boundary(q)) | |
58 | return false; | |
59 | ||
60 | /* | |
61 | * Don't merge if the 1st bio starts with non-zero offset, otherwise it | |
62 | * is quite difficult to respect the sg gap limit. We work hard to | |
63 | * merge a huge number of small single bios in case of mkfs. | |
64 | */ | |
65 | if (prev_rq) | |
66 | bio_get_first_bvec(prev_rq->bio, &pb); | |
67 | else | |
68 | bio_get_first_bvec(prev, &pb); | |
df376b2e | 69 | if (pb.bv_offset & queue_virt_boundary(q)) |
e9907009 CH |
70 | return true; |
71 | ||
72 | /* | |
73 | * We don't need to worry about the situation that the merged segment | |
74 | * ends in unaligned virt boundary: | |
75 | * | |
76 | * - if 'pb' ends aligned, the merged segment ends aligned | |
77 | * - if 'pb' ends unaligned, the next bio must include | |
78 | * one single bvec of 'nb', otherwise the 'nb' can't | |
79 | * merge with 'pb' | |
80 | */ | |
81 | bio_get_last_bvec(prev, &pb); | |
82 | bio_get_first_bvec(next, &nb); | |
200a9aff | 83 | if (biovec_phys_mergeable(q, &pb, &nb)) |
e9907009 | 84 | return false; |
c55ddd90 | 85 | return __bvec_gap_to_prev(&q->limits, &pb, nb.bv_offset); |
e9907009 CH |
86 | } |
87 | ||
88 | static inline bool req_gap_back_merge(struct request *req, struct bio *bio) | |
89 | { | |
90 | return bio_will_gap(req->q, req, req->biotail, bio); | |
91 | } | |
92 | ||
93 | static inline bool req_gap_front_merge(struct request *req, struct bio *bio) | |
94 | { | |
95 | return bio_will_gap(req->q, NULL, bio, req->bio); | |
96 | } | |
97 | ||
b6dc6198 CH |
98 | /* |
99 | * The max size one bio can handle is UINT_MAX becasue bvec_iter.bi_size | |
100 | * is defined as 'unsigned int', meantime it has to be aligned to with the | |
101 | * logical block size, which is the minimum accepted unit by hardware. | |
102 | */ | |
aa261f20 | 103 | static unsigned int bio_allowed_max_sectors(const struct queue_limits *lim) |
b6dc6198 | 104 | { |
c55ddd90 | 105 | return round_down(UINT_MAX, lim->logical_block_size) >> SECTOR_SHIFT; |
b6dc6198 CH |
106 | } |
107 | ||
b35243a4 CH |
108 | static struct bio *bio_submit_split(struct bio *bio, int split_sectors) |
109 | { | |
110 | if (unlikely(split_sectors < 0)) { | |
111 | bio->bi_status = errno_to_blk_status(split_sectors); | |
112 | bio_endio(bio); | |
113 | return NULL; | |
114 | } | |
115 | ||
116 | if (split_sectors) { | |
117 | struct bio *split; | |
118 | ||
119 | split = bio_split(bio, split_sectors, GFP_NOIO, | |
120 | &bio->bi_bdev->bd_disk->bio_split); | |
121 | split->bi_opf |= REQ_NOMERGE; | |
122 | blkcg_bio_issue_init(split); | |
123 | bio_chain(split, bio); | |
124 | trace_block_split(split, bio->bi_iter.bi_sector); | |
125 | WARN_ON_ONCE(bio_zone_write_plugging(bio)); | |
126 | submit_bio_noacct(bio); | |
127 | return split; | |
128 | } | |
129 | ||
130 | return bio; | |
131 | } | |
132 | ||
133 | struct bio *bio_split_discard(struct bio *bio, const struct queue_limits *lim, | |
134 | unsigned *nsegs) | |
54efd50b KO |
135 | { |
136 | unsigned int max_discard_sectors, granularity; | |
54efd50b KO |
137 | sector_t tmp; |
138 | unsigned split_sectors; | |
139 | ||
bdced438 ML |
140 | *nsegs = 1; |
141 | ||
c55ddd90 | 142 | granularity = max(lim->discard_granularity >> 9, 1U); |
54efd50b | 143 | |
c55ddd90 CH |
144 | max_discard_sectors = |
145 | min(lim->max_discard_sectors, bio_allowed_max_sectors(lim)); | |
54efd50b | 146 | max_discard_sectors -= max_discard_sectors % granularity; |
928a5dd3 | 147 | if (unlikely(!max_discard_sectors)) |
b35243a4 | 148 | return bio; |
54efd50b KO |
149 | |
150 | if (bio_sectors(bio) <= max_discard_sectors) | |
b35243a4 | 151 | return bio; |
54efd50b KO |
152 | |
153 | split_sectors = max_discard_sectors; | |
154 | ||
155 | /* | |
156 | * If the next starting sector would be misaligned, stop the discard at | |
157 | * the previous aligned sector. | |
158 | */ | |
c55ddd90 CH |
159 | tmp = bio->bi_iter.bi_sector + split_sectors - |
160 | ((lim->discard_alignment >> 9) % granularity); | |
54efd50b KO |
161 | tmp = sector_div(tmp, granularity); |
162 | ||
163 | if (split_sectors > tmp) | |
164 | split_sectors -= tmp; | |
165 | ||
b35243a4 | 166 | return bio_submit_split(bio, split_sectors); |
54efd50b KO |
167 | } |
168 | ||
b35243a4 CH |
169 | struct bio *bio_split_write_zeroes(struct bio *bio, |
170 | const struct queue_limits *lim, unsigned *nsegs) | |
885fa13f | 171 | { |
d665e12a | 172 | *nsegs = 0; |
c55ddd90 | 173 | if (!lim->max_write_zeroes_sectors) |
b35243a4 | 174 | return bio; |
c55ddd90 | 175 | if (bio_sectors(bio) <= lim->max_write_zeroes_sectors) |
b35243a4 CH |
176 | return bio; |
177 | return bio_submit_split(bio, lim->max_write_zeroes_sectors); | |
885fa13f CH |
178 | } |
179 | ||
9da3d1e9 JG |
180 | static inline unsigned int blk_boundary_sectors(const struct queue_limits *lim, |
181 | bool is_atomic) | |
f70167a7 | 182 | { |
9da3d1e9 JG |
183 | /* |
184 | * chunk_sectors must be a multiple of atomic_write_boundary_sectors if | |
185 | * both non-zero. | |
186 | */ | |
187 | if (is_atomic && lim->atomic_write_boundary_sectors) | |
188 | return lim->atomic_write_boundary_sectors; | |
189 | ||
f70167a7 JG |
190 | return lim->chunk_sectors; |
191 | } | |
192 | ||
9cc5169c BVA |
193 | /* |
194 | * Return the maximum number of sectors from the start of a bio that may be | |
195 | * submitted as a single request to a block device. If enough sectors remain, | |
196 | * align the end to the physical block size. Otherwise align the end to the | |
197 | * logical block size. This approach minimizes the number of non-aligned | |
198 | * requests that are submitted to a block device if the start of a bio is not | |
199 | * aligned to a physical block boundary. | |
200 | */ | |
5a97806f | 201 | static inline unsigned get_max_io_size(struct bio *bio, |
aa261f20 | 202 | const struct queue_limits *lim) |
d0e5fbb0 | 203 | { |
c55ddd90 CH |
204 | unsigned pbs = lim->physical_block_size >> SECTOR_SHIFT; |
205 | unsigned lbs = lim->logical_block_size >> SECTOR_SHIFT; | |
9da3d1e9 JG |
206 | bool is_atomic = bio->bi_opf & REQ_ATOMIC; |
207 | unsigned boundary_sectors = blk_boundary_sectors(lim, is_atomic); | |
208 | unsigned max_sectors, start, end; | |
209 | ||
210 | /* | |
211 | * We ignore lim->max_sectors for atomic writes because it may less | |
212 | * than the actual bio size, which we cannot tolerate. | |
213 | */ | |
214 | if (is_atomic) | |
215 | max_sectors = lim->atomic_write_max_sectors; | |
216 | else | |
217 | max_sectors = lim->max_sectors; | |
d0e5fbb0 | 218 | |
f70167a7 | 219 | if (boundary_sectors) { |
efef739d | 220 | max_sectors = min(max_sectors, |
f70167a7 JG |
221 | blk_boundary_sectors_left(bio->bi_iter.bi_sector, |
222 | boundary_sectors)); | |
efef739d | 223 | } |
d0e5fbb0 | 224 | |
84613bed CH |
225 | start = bio->bi_iter.bi_sector & (pbs - 1); |
226 | end = (start + max_sectors) & ~(pbs - 1); | |
227 | if (end > start) | |
228 | return end - start; | |
229 | return max_sectors & ~(lbs - 1); | |
d0e5fbb0 ML |
230 | } |
231 | ||
95465318 BVA |
232 | /** |
233 | * get_max_segment_size() - maximum number of bytes to add as a single segment | |
234 | * @lim: Request queue limits. | |
09595e0c | 235 | * @paddr: address of the range to add |
0ffc46eb | 236 | * @len: maximum length available to add at @paddr |
95465318 | 237 | * |
09595e0c CH |
238 | * Returns the maximum number of bytes of the range starting at @paddr that can |
239 | * be added to a single segment. | |
95465318 | 240 | */ |
aa261f20 | 241 | static inline unsigned get_max_segment_size(const struct queue_limits *lim, |
09595e0c | 242 | phys_addr_t paddr, unsigned int len) |
dcebd755 | 243 | { |
4a2f704e | 244 | /* |
95465318 BVA |
245 | * Prevent an overflow if mask = ULONG_MAX and offset = 0 by adding 1 |
246 | * after having calculated the minimum. | |
4a2f704e | 247 | */ |
09595e0c CH |
248 | return min_t(unsigned long, len, |
249 | min(lim->seg_boundary_mask - (lim->seg_boundary_mask & paddr), | |
250 | (unsigned long)lim->max_segment_size - 1) + 1); | |
dcebd755 ML |
251 | } |
252 | ||
708b25b3 BVA |
253 | /** |
254 | * bvec_split_segs - verify whether or not a bvec should be split in the middle | |
c55ddd90 | 255 | * @lim: [in] queue limits to split based on |
708b25b3 BVA |
256 | * @bv: [in] bvec to examine |
257 | * @nsegs: [in,out] Number of segments in the bio being built. Incremented | |
258 | * by the number of segments from @bv that may be appended to that | |
259 | * bio without exceeding @max_segs | |
67927d22 KB |
260 | * @bytes: [in,out] Number of bytes in the bio being built. Incremented |
261 | * by the number of bytes from @bv that may be appended to that | |
262 | * bio without exceeding @max_bytes | |
708b25b3 | 263 | * @max_segs: [in] upper bound for *@nsegs |
67927d22 | 264 | * @max_bytes: [in] upper bound for *@bytes |
708b25b3 BVA |
265 | * |
266 | * When splitting a bio, it can happen that a bvec is encountered that is too | |
267 | * big to fit in a single segment and hence that it has to be split in the | |
268 | * middle. This function verifies whether or not that should happen. The value | |
269 | * %true is returned if and only if appending the entire @bv to a bio with | |
270 | * *@nsegs segments and *@sectors sectors would make that bio unacceptable for | |
271 | * the block driver. | |
dcebd755 | 272 | */ |
aa261f20 BVA |
273 | static bool bvec_split_segs(const struct queue_limits *lim, |
274 | const struct bio_vec *bv, unsigned *nsegs, unsigned *bytes, | |
275 | unsigned max_segs, unsigned max_bytes) | |
dcebd755 | 276 | { |
67927d22 | 277 | unsigned max_len = min(max_bytes, UINT_MAX) - *bytes; |
708b25b3 | 278 | unsigned len = min(bv->bv_len, max_len); |
dcebd755 | 279 | unsigned total_len = 0; |
ff9811b3 | 280 | unsigned seg_size = 0; |
dcebd755 | 281 | |
ff9811b3 | 282 | while (len && *nsegs < max_segs) { |
09595e0c | 283 | seg_size = get_max_segment_size(lim, bvec_phys(bv) + total_len, len); |
dcebd755 | 284 | |
ff9811b3 | 285 | (*nsegs)++; |
dcebd755 ML |
286 | total_len += seg_size; |
287 | len -= seg_size; | |
288 | ||
c55ddd90 | 289 | if ((bv->bv_offset + total_len) & lim->virt_boundary_mask) |
dcebd755 ML |
290 | break; |
291 | } | |
292 | ||
67927d22 | 293 | *bytes += total_len; |
dcebd755 | 294 | |
708b25b3 BVA |
295 | /* tell the caller to split the bvec if it is too big to fit */ |
296 | return len > 0 || bv->bv_len > max_len; | |
dcebd755 ML |
297 | } |
298 | ||
dad77584 | 299 | /** |
b35243a4 | 300 | * bio_split_rw_at - check if and where to split a read/write bio |
dad77584 | 301 | * @bio: [in] bio to be split |
c55ddd90 | 302 | * @lim: [in] queue limits to split based on |
dad77584 | 303 | * @segs: [out] number of segments in the bio with the first half of the sectors |
a85b3637 | 304 | * @max_bytes: [in] maximum number of bytes per bio |
dad77584 | 305 | * |
b35243a4 CH |
306 | * Find out if @bio needs to be split to fit the queue limits in @lim and a |
307 | * maximum size of @max_bytes. Returns a negative error number if @bio can't be | |
308 | * split, 0 if the bio doesn't have to be split, or a positive sector offset if | |
309 | * @bio needs to be split. | |
dad77584 | 310 | */ |
b35243a4 CH |
311 | int bio_split_rw_at(struct bio *bio, const struct queue_limits *lim, |
312 | unsigned *segs, unsigned max_bytes) | |
54efd50b | 313 | { |
5014c311 | 314 | struct bio_vec bv, bvprv, *bvprvp = NULL; |
54efd50b | 315 | struct bvec_iter iter; |
67927d22 | 316 | unsigned nsegs = 0, bytes = 0; |
54efd50b | 317 | |
dcebd755 | 318 | bio_for_each_bvec(bv, bio, iter) { |
54efd50b KO |
319 | /* |
320 | * If the queue doesn't support SG gaps and adding this | |
321 | * offset would create a gap, disallow it. | |
322 | */ | |
c55ddd90 | 323 | if (bvprvp && bvec_gap_to_prev(lim, bvprvp, bv.bv_offset)) |
54efd50b KO |
324 | goto split; |
325 | ||
c55ddd90 | 326 | if (nsegs < lim->max_segments && |
67927d22 | 327 | bytes + bv.bv_len <= max_bytes && |
708b25b3 BVA |
328 | bv.bv_offset + bv.bv_len <= PAGE_SIZE) { |
329 | nsegs++; | |
67927d22 | 330 | bytes += bv.bv_len; |
c55ddd90 CH |
331 | } else { |
332 | if (bvec_split_segs(lim, &bv, &nsegs, &bytes, | |
333 | lim->max_segments, max_bytes)) | |
334 | goto split; | |
e36f6204 KB |
335 | } |
336 | ||
54efd50b | 337 | bvprv = bv; |
578270bf | 338 | bvprvp = &bvprv; |
54efd50b KO |
339 | } |
340 | ||
d627065d | 341 | *segs = nsegs; |
b35243a4 | 342 | return 0; |
54efd50b | 343 | split: |
b35243a4 CH |
344 | if (bio->bi_opf & REQ_ATOMIC) |
345 | return -EINVAL; | |
346 | ||
9cea62b2 JA |
347 | /* |
348 | * We can't sanely support splitting for a REQ_NOWAIT bio. End it | |
349 | * with EAGAIN if splitting is required and return an error pointer. | |
350 | */ | |
b35243a4 CH |
351 | if (bio->bi_opf & REQ_NOWAIT) |
352 | return -EAGAIN; | |
9cea62b2 | 353 | |
bdced438 | 354 | *segs = nsegs; |
cc29e1bf | 355 | |
67927d22 KB |
356 | /* |
357 | * Individual bvecs might not be logical block aligned. Round down the | |
358 | * split size so that each bio is properly block size aligned, even if | |
359 | * we do not use the full hardware limits. | |
360 | */ | |
c55ddd90 | 361 | bytes = ALIGN_DOWN(bytes, lim->logical_block_size); |
67927d22 | 362 | |
cc29e1bf JX |
363 | /* |
364 | * Bio splitting may cause subtle trouble such as hang when doing sync | |
365 | * iopoll in direct IO routine. Given performance gain of iopoll for | |
366 | * big IO can be trival, disable iopoll when split needed. | |
367 | */ | |
6ce913fe | 368 | bio_clear_polled(bio); |
b35243a4 | 369 | return bytes >> SECTOR_SHIFT; |
54efd50b | 370 | } |
b35243a4 | 371 | EXPORT_SYMBOL_GPL(bio_split_rw_at); |
54efd50b | 372 | |
b35243a4 CH |
373 | struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim, |
374 | unsigned *nr_segs) | |
54efd50b | 375 | { |
b35243a4 CH |
376 | return bio_submit_split(bio, |
377 | bio_split_rw_at(bio, lim, nr_segs, | |
378 | get_max_io_size(bio, lim) << SECTOR_SHIFT)); | |
54efd50b | 379 | } |
14ccb66b | 380 | |
1e8a7f6a CH |
381 | /* |
382 | * REQ_OP_ZONE_APPEND bios must never be split by the block layer. | |
383 | * | |
384 | * But we want the nr_segs calculation provided by bio_split_rw_at, and having | |
385 | * a good sanity check that the submitter built the bio correctly is nice to | |
386 | * have as well. | |
387 | */ | |
388 | struct bio *bio_split_zone_append(struct bio *bio, | |
389 | const struct queue_limits *lim, unsigned *nr_segs) | |
390 | { | |
391 | unsigned int max_sectors = queue_limits_max_zone_append_sectors(lim); | |
392 | int split_sectors; | |
393 | ||
394 | split_sectors = bio_split_rw_at(bio, lim, nr_segs, | |
395 | max_sectors << SECTOR_SHIFT); | |
396 | if (WARN_ON_ONCE(split_sectors > 0)) | |
397 | split_sectors = -EINVAL; | |
398 | return bio_submit_split(bio, split_sectors); | |
399 | } | |
400 | ||
dad77584 | 401 | /** |
5a97806f CH |
402 | * bio_split_to_limits - split a bio to fit the queue limits |
403 | * @bio: bio to be split | |
404 | * | |
405 | * Check if @bio needs splitting based on the queue limits of @bio->bi_bdev, and | |
406 | * if so split off a bio fitting the limits from the beginning of @bio and | |
407 | * return it. @bio is shortened to the remainder and re-submitted. | |
dad77584 | 408 | * |
5a97806f CH |
409 | * The split bio is allocated from @q->bio_split, which is provided by the |
410 | * block layer. | |
dad77584 | 411 | */ |
5a97806f | 412 | struct bio *bio_split_to_limits(struct bio *bio) |
14ccb66b | 413 | { |
aa261f20 | 414 | const struct queue_limits *lim = &bdev_get_queue(bio->bi_bdev)->limits; |
14ccb66b CH |
415 | unsigned int nr_segs; |
416 | ||
b35243a4 | 417 | return __bio_split_to_limits(bio, lim, &nr_segs); |
14ccb66b | 418 | } |
5a97806f | 419 | EXPORT_SYMBOL(bio_split_to_limits); |
54efd50b | 420 | |
e9cd19c0 | 421 | unsigned int blk_recalc_rq_segments(struct request *rq) |
d6d48196 | 422 | { |
6869875f | 423 | unsigned int nr_phys_segs = 0; |
67927d22 | 424 | unsigned int bytes = 0; |
e9cd19c0 | 425 | struct req_iterator iter; |
6869875f | 426 | struct bio_vec bv; |
d6d48196 | 427 | |
e9cd19c0 | 428 | if (!rq->bio) |
1e428079 | 429 | return 0; |
d6d48196 | 430 | |
e9cd19c0 | 431 | switch (bio_op(rq->bio)) { |
a6f0788e CK |
432 | case REQ_OP_DISCARD: |
433 | case REQ_OP_SECURE_ERASE: | |
a958937f DJ |
434 | if (queue_max_discard_segments(rq->q) > 1) { |
435 | struct bio *bio = rq->bio; | |
436 | ||
437 | for_each_bio(bio) | |
438 | nr_phys_segs++; | |
439 | return nr_phys_segs; | |
440 | } | |
441 | return 1; | |
a6f0788e | 442 | case REQ_OP_WRITE_ZEROES: |
f9d03f96 | 443 | return 0; |
2d9b02be BVA |
444 | default: |
445 | break; | |
a6f0788e | 446 | } |
5cb8850c | 447 | |
e9cd19c0 | 448 | rq_for_each_bvec(bv, rq, iter) |
c55ddd90 | 449 | bvec_split_segs(&rq->q->limits, &bv, &nr_phys_segs, &bytes, |
708b25b3 | 450 | UINT_MAX, UINT_MAX); |
1e428079 JA |
451 | return nr_phys_segs; |
452 | } | |
453 | ||
48d7727c | 454 | static inline struct scatterlist *blk_next_sg(struct scatterlist **sg, |
862e5a5e ML |
455 | struct scatterlist *sglist) |
456 | { | |
457 | if (!*sg) | |
458 | return sglist; | |
459 | ||
460 | /* | |
461 | * If the driver previously mapped a shorter list, we could see a | |
462 | * termination bit prematurely unless it fully inits the sg table | |
463 | * on each mapping. We KNOW that there must be more entries here | |
464 | * or the driver would be buggy, so force clear the termination bit | |
465 | * to avoid doing a full sg_init_table() in drivers for each command. | |
466 | */ | |
467 | sg_unmark_end(*sg); | |
468 | return sg_next(*sg); | |
469 | } | |
470 | ||
471 | static unsigned blk_bvec_map_sg(struct request_queue *q, | |
472 | struct bio_vec *bvec, struct scatterlist *sglist, | |
473 | struct scatterlist **sg) | |
474 | { | |
475 | unsigned nbytes = bvec->bv_len; | |
8a96a0e4 | 476 | unsigned nsegs = 0, total = 0; |
862e5a5e ML |
477 | |
478 | while (nbytes > 0) { | |
8a96a0e4 | 479 | unsigned offset = bvec->bv_offset + total; |
61353a63 CH |
480 | unsigned len = get_max_segment_size(&q->limits, |
481 | bvec_phys(bvec) + total, nbytes); | |
f9f76879 CH |
482 | struct page *page = bvec->bv_page; |
483 | ||
484 | /* | |
485 | * Unfortunately a fair number of drivers barf on scatterlists | |
486 | * that have an offset larger than PAGE_SIZE, despite other | |
487 | * subsystems dealing with that invariant just fine. For now | |
488 | * stick to the legacy format where we never present those from | |
489 | * the block layer, but the code below should be removed once | |
490 | * these offenders (mostly MMC/SD drivers) are fixed. | |
491 | */ | |
492 | page += (offset >> PAGE_SHIFT); | |
493 | offset &= ~PAGE_MASK; | |
862e5a5e ML |
494 | |
495 | *sg = blk_next_sg(sg, sglist); | |
f9f76879 | 496 | sg_set_page(*sg, page, len, offset); |
862e5a5e | 497 | |
8a96a0e4 CH |
498 | total += len; |
499 | nbytes -= len; | |
862e5a5e ML |
500 | nsegs++; |
501 | } | |
502 | ||
503 | return nsegs; | |
504 | } | |
505 | ||
16e3e418 ML |
506 | static inline int __blk_bvec_map_sg(struct bio_vec bv, |
507 | struct scatterlist *sglist, struct scatterlist **sg) | |
508 | { | |
509 | *sg = blk_next_sg(sg, sglist); | |
510 | sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset); | |
511 | return 1; | |
512 | } | |
513 | ||
f6970f83 ML |
514 | /* only try to merge bvecs into one sg if they are from two bios */ |
515 | static inline bool | |
516 | __blk_segment_map_sg_merge(struct request_queue *q, struct bio_vec *bvec, | |
517 | struct bio_vec *bvprv, struct scatterlist **sg) | |
963ab9e5 AH |
518 | { |
519 | ||
520 | int nbytes = bvec->bv_len; | |
521 | ||
f6970f83 ML |
522 | if (!*sg) |
523 | return false; | |
963ab9e5 | 524 | |
f6970f83 ML |
525 | if ((*sg)->length + nbytes > queue_max_segment_size(q)) |
526 | return false; | |
527 | ||
528 | if (!biovec_phys_mergeable(q, bvprv, bvec)) | |
529 | return false; | |
530 | ||
531 | (*sg)->length += nbytes; | |
532 | ||
533 | return true; | |
963ab9e5 AH |
534 | } |
535 | ||
5cb8850c KO |
536 | static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio, |
537 | struct scatterlist *sglist, | |
538 | struct scatterlist **sg) | |
d6d48196 | 539 | { |
3f649ab7 | 540 | struct bio_vec bvec, bvprv = { NULL }; |
5cb8850c | 541 | struct bvec_iter iter; |
38417468 | 542 | int nsegs = 0; |
f6970f83 | 543 | bool new_bio = false; |
5cb8850c | 544 | |
f6970f83 ML |
545 | for_each_bio(bio) { |
546 | bio_for_each_bvec(bvec, bio, iter) { | |
547 | /* | |
548 | * Only try to merge bvecs from two bios given we | |
549 | * have done bio internal merge when adding pages | |
550 | * to bio | |
551 | */ | |
552 | if (new_bio && | |
553 | __blk_segment_map_sg_merge(q, &bvec, &bvprv, sg)) | |
554 | goto next_bvec; | |
555 | ||
556 | if (bvec.bv_offset + bvec.bv_len <= PAGE_SIZE) | |
557 | nsegs += __blk_bvec_map_sg(bvec, sglist, sg); | |
558 | else | |
559 | nsegs += blk_bvec_map_sg(q, &bvec, sglist, sg); | |
560 | next_bvec: | |
561 | new_bio = false; | |
562 | } | |
b21e11c5 ML |
563 | if (likely(bio->bi_iter.bi_size)) { |
564 | bvprv = bvec; | |
565 | new_bio = true; | |
566 | } | |
f6970f83 | 567 | } |
d6d48196 | 568 | |
5cb8850c KO |
569 | return nsegs; |
570 | } | |
571 | ||
572 | /* | |
573 | * map a request to scatterlist, return number of sg entries setup. Caller | |
574 | * must make sure sg can hold rq->nr_phys_segments entries | |
575 | */ | |
89de1504 CH |
576 | int __blk_rq_map_sg(struct request_queue *q, struct request *rq, |
577 | struct scatterlist *sglist, struct scatterlist **last_sg) | |
5cb8850c | 578 | { |
5cb8850c KO |
579 | int nsegs = 0; |
580 | ||
f9d03f96 | 581 | if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) |
89de1504 | 582 | nsegs = __blk_bvec_map_sg(rq->special_vec, sglist, last_sg); |
f9d03f96 | 583 | else if (rq->bio) |
89de1504 | 584 | nsegs = __blk_bios_map_sg(q, rq->bio, sglist, last_sg); |
f18573ab | 585 | |
89de1504 CH |
586 | if (*last_sg) |
587 | sg_mark_end(*last_sg); | |
d6d48196 | 588 | |
12e57f59 ML |
589 | /* |
590 | * Something must have been wrong if the figured number of | |
591 | * segment is bigger than number of req's physical segments | |
592 | */ | |
f9d03f96 | 593 | WARN_ON(nsegs > blk_rq_nr_phys_segments(rq)); |
12e57f59 | 594 | |
d6d48196 JA |
595 | return nsegs; |
596 | } | |
89de1504 | 597 | EXPORT_SYMBOL(__blk_rq_map_sg); |
d6d48196 | 598 | |
badf7f64 CH |
599 | static inline unsigned int blk_rq_get_max_sectors(struct request *rq, |
600 | sector_t offset) | |
601 | { | |
602 | struct request_queue *q = rq->q; | |
f70167a7 JG |
603 | struct queue_limits *lim = &q->limits; |
604 | unsigned int max_sectors, boundary_sectors; | |
9da3d1e9 | 605 | bool is_atomic = rq->cmd_flags & REQ_ATOMIC; |
badf7f64 CH |
606 | |
607 | if (blk_rq_is_passthrough(rq)) | |
608 | return q->limits.max_hw_sectors; | |
609 | ||
9da3d1e9 | 610 | boundary_sectors = blk_boundary_sectors(lim, is_atomic); |
8d1dfd51 JG |
611 | max_sectors = blk_queue_get_max_sectors(rq); |
612 | ||
f70167a7 | 613 | if (!boundary_sectors || |
badf7f64 CH |
614 | req_op(rq) == REQ_OP_DISCARD || |
615 | req_op(rq) == REQ_OP_SECURE_ERASE) | |
c8875190 CH |
616 | return max_sectors; |
617 | return min(max_sectors, | |
f70167a7 | 618 | blk_boundary_sectors_left(offset, boundary_sectors)); |
badf7f64 CH |
619 | } |
620 | ||
14ccb66b CH |
621 | static inline int ll_new_hw_segment(struct request *req, struct bio *bio, |
622 | unsigned int nr_phys_segs) | |
d6d48196 | 623 | { |
6b2b0459 TH |
624 | if (!blk_cgroup_mergeable(req, bio)) |
625 | goto no_merge; | |
626 | ||
2705dfb2 | 627 | if (blk_integrity_merge_bio(req->q, req, bio) == false) |
13f05c8d MP |
628 | goto no_merge; |
629 | ||
2705dfb2 ML |
630 | /* discard request merge won't add new segment */ |
631 | if (req_op(req) == REQ_OP_DISCARD) | |
632 | return 1; | |
633 | ||
634 | if (req->nr_phys_segments + nr_phys_segs > blk_rq_get_max_segments(req)) | |
13f05c8d | 635 | goto no_merge; |
d6d48196 JA |
636 | |
637 | /* | |
638 | * This will form the start of a new hw segment. Bump both | |
639 | * counters. | |
640 | */ | |
d6d48196 JA |
641 | req->nr_phys_segments += nr_phys_segs; |
642 | return 1; | |
13f05c8d MP |
643 | |
644 | no_merge: | |
14ccb66b | 645 | req_set_nomerge(req->q, req); |
13f05c8d | 646 | return 0; |
d6d48196 JA |
647 | } |
648 | ||
14ccb66b | 649 | int ll_back_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs) |
d6d48196 | 650 | { |
5e7c4274 JA |
651 | if (req_gap_back_merge(req, bio)) |
652 | return 0; | |
7f39add3 SG |
653 | if (blk_integrity_rq(req) && |
654 | integrity_req_gap_back_merge(req, bio)) | |
655 | return 0; | |
a892c8d5 ST |
656 | if (!bio_crypt_ctx_back_mergeable(req, bio)) |
657 | return 0; | |
f31dc1cd | 658 | if (blk_rq_sectors(req) + bio_sectors(bio) > |
17007f39 | 659 | blk_rq_get_max_sectors(req, blk_rq_pos(req))) { |
14ccb66b | 660 | req_set_nomerge(req->q, req); |
d6d48196 JA |
661 | return 0; |
662 | } | |
d6d48196 | 663 | |
14ccb66b | 664 | return ll_new_hw_segment(req, bio, nr_segs); |
d6d48196 JA |
665 | } |
666 | ||
eda5cc99 CH |
667 | static int ll_front_merge_fn(struct request *req, struct bio *bio, |
668 | unsigned int nr_segs) | |
d6d48196 | 669 | { |
5e7c4274 JA |
670 | if (req_gap_front_merge(req, bio)) |
671 | return 0; | |
7f39add3 SG |
672 | if (blk_integrity_rq(req) && |
673 | integrity_req_gap_front_merge(req, bio)) | |
674 | return 0; | |
a892c8d5 ST |
675 | if (!bio_crypt_ctx_front_mergeable(req, bio)) |
676 | return 0; | |
f31dc1cd | 677 | if (blk_rq_sectors(req) + bio_sectors(bio) > |
17007f39 | 678 | blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) { |
14ccb66b | 679 | req_set_nomerge(req->q, req); |
d6d48196 JA |
680 | return 0; |
681 | } | |
d6d48196 | 682 | |
14ccb66b | 683 | return ll_new_hw_segment(req, bio, nr_segs); |
d6d48196 JA |
684 | } |
685 | ||
445251d0 JA |
686 | static bool req_attempt_discard_merge(struct request_queue *q, struct request *req, |
687 | struct request *next) | |
688 | { | |
689 | unsigned short segments = blk_rq_nr_discard_segments(req); | |
690 | ||
691 | if (segments >= queue_max_discard_segments(q)) | |
692 | goto no_merge; | |
693 | if (blk_rq_sectors(req) + bio_sectors(next->bio) > | |
694 | blk_rq_get_max_sectors(req, blk_rq_pos(req))) | |
695 | goto no_merge; | |
696 | ||
697 | req->nr_phys_segments = segments + blk_rq_nr_discard_segments(next); | |
698 | return true; | |
699 | no_merge: | |
700 | req_set_nomerge(q, req); | |
701 | return false; | |
702 | } | |
703 | ||
d6d48196 JA |
704 | static int ll_merge_requests_fn(struct request_queue *q, struct request *req, |
705 | struct request *next) | |
706 | { | |
707 | int total_phys_segments; | |
d6d48196 | 708 | |
5e7c4274 | 709 | if (req_gap_back_merge(req, next->bio)) |
854fbb9c KB |
710 | return 0; |
711 | ||
d6d48196 JA |
712 | /* |
713 | * Will it become too large? | |
714 | */ | |
f31dc1cd | 715 | if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > |
17007f39 | 716 | blk_rq_get_max_sectors(req, blk_rq_pos(req))) |
d6d48196 JA |
717 | return 0; |
718 | ||
719 | total_phys_segments = req->nr_phys_segments + next->nr_phys_segments; | |
943b40c8 | 720 | if (total_phys_segments > blk_rq_get_max_segments(req)) |
d6d48196 JA |
721 | return 0; |
722 | ||
6b2b0459 TH |
723 | if (!blk_cgroup_mergeable(req, next->bio)) |
724 | return 0; | |
725 | ||
4eaf99be | 726 | if (blk_integrity_merge_rq(q, req, next) == false) |
13f05c8d MP |
727 | return 0; |
728 | ||
a892c8d5 ST |
729 | if (!bio_crypt_ctx_merge_rq(req, next)) |
730 | return 0; | |
731 | ||
d6d48196 JA |
732 | /* Merge is OK... */ |
733 | req->nr_phys_segments = total_phys_segments; | |
d6d48196 JA |
734 | return 1; |
735 | } | |
736 | ||
80a761fd TH |
737 | /** |
738 | * blk_rq_set_mixed_merge - mark a request as mixed merge | |
739 | * @rq: request to mark as mixed merge | |
740 | * | |
741 | * Description: | |
742 | * @rq is about to be mixed merged. Make sure the attributes | |
743 | * which can be mixed are set in each bio and mark @rq as mixed | |
744 | * merged. | |
745 | */ | |
dc53d9ea | 746 | static void blk_rq_set_mixed_merge(struct request *rq) |
80a761fd | 747 | { |
16458cf3 | 748 | blk_opf_t ff = rq->cmd_flags & REQ_FAILFAST_MASK; |
80a761fd TH |
749 | struct bio *bio; |
750 | ||
e8064021 | 751 | if (rq->rq_flags & RQF_MIXED_MERGE) |
80a761fd TH |
752 | return; |
753 | ||
754 | /* | |
755 | * @rq will no longer represent mixable attributes for all the | |
756 | * contained bios. It will just track those of the first one. | |
757 | * Distributes the attributs to each bio. | |
758 | */ | |
759 | for (bio = rq->bio; bio; bio = bio->bi_next) { | |
1eff9d32 JA |
760 | WARN_ON_ONCE((bio->bi_opf & REQ_FAILFAST_MASK) && |
761 | (bio->bi_opf & REQ_FAILFAST_MASK) != ff); | |
762 | bio->bi_opf |= ff; | |
80a761fd | 763 | } |
e8064021 | 764 | rq->rq_flags |= RQF_MIXED_MERGE; |
80a761fd TH |
765 | } |
766 | ||
f3ca7386 | 767 | static inline blk_opf_t bio_failfast(const struct bio *bio) |
3ce6a115 ML |
768 | { |
769 | if (bio->bi_opf & REQ_RAHEAD) | |
770 | return REQ_FAILFAST_MASK; | |
771 | ||
772 | return bio->bi_opf & REQ_FAILFAST_MASK; | |
773 | } | |
774 | ||
775 | /* | |
776 | * After we are marked as MIXED_MERGE, any new RA bio has to be updated | |
777 | * as failfast, and request's failfast has to be updated in case of | |
778 | * front merge. | |
779 | */ | |
780 | static inline void blk_update_mixed_merge(struct request *req, | |
781 | struct bio *bio, bool front_merge) | |
782 | { | |
783 | if (req->rq_flags & RQF_MIXED_MERGE) { | |
784 | if (bio->bi_opf & REQ_RAHEAD) | |
785 | bio->bi_opf |= REQ_FAILFAST_MASK; | |
786 | ||
787 | if (front_merge) { | |
788 | req->cmd_flags &= ~REQ_FAILFAST_MASK; | |
789 | req->cmd_flags |= bio->bi_opf & REQ_FAILFAST_MASK; | |
790 | } | |
791 | } | |
792 | } | |
793 | ||
b9c54f56 | 794 | static void blk_account_io_merge_request(struct request *req) |
26308eab JM |
795 | { |
796 | if (blk_do_io_stat(req)) { | |
112f158f | 797 | part_stat_lock(); |
b9c54f56 | 798 | part_stat_inc(req->part, merges[op_stat_group(req_op(req))]); |
99dc4223 YK |
799 | part_stat_local_dec(req->part, |
800 | in_flight[op_is_write(req_op(req))]); | |
26308eab JM |
801 | part_stat_unlock(); |
802 | } | |
803 | } | |
b9c54f56 | 804 | |
e96c0d83 EB |
805 | static enum elv_merge blk_try_req_merge(struct request *req, |
806 | struct request *next) | |
69840466 JW |
807 | { |
808 | if (blk_discard_mergable(req)) | |
809 | return ELEVATOR_DISCARD_MERGE; | |
810 | else if (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos(next)) | |
811 | return ELEVATOR_BACK_MERGE; | |
812 | ||
813 | return ELEVATOR_NO_MERGE; | |
814 | } | |
26308eab | 815 | |
9da3d1e9 JG |
816 | static bool blk_atomic_write_mergeable_rq_bio(struct request *rq, |
817 | struct bio *bio) | |
818 | { | |
819 | return (rq->cmd_flags & REQ_ATOMIC) == (bio->bi_opf & REQ_ATOMIC); | |
820 | } | |
821 | ||
822 | static bool blk_atomic_write_mergeable_rqs(struct request *rq, | |
823 | struct request *next) | |
824 | { | |
825 | return (rq->cmd_flags & REQ_ATOMIC) == (next->cmd_flags & REQ_ATOMIC); | |
826 | } | |
827 | ||
d6d48196 | 828 | /* |
b973cb7e JA |
829 | * For non-mq, this has to be called with the request spinlock acquired. |
830 | * For mq with scheduling, the appropriate queue wide lock should be held. | |
d6d48196 | 831 | */ |
b973cb7e JA |
832 | static struct request *attempt_merge(struct request_queue *q, |
833 | struct request *req, struct request *next) | |
d6d48196 JA |
834 | { |
835 | if (!rq_mergeable(req) || !rq_mergeable(next)) | |
b973cb7e | 836 | return NULL; |
d6d48196 | 837 | |
288dab8a | 838 | if (req_op(req) != req_op(next)) |
b973cb7e | 839 | return NULL; |
f31dc1cd | 840 | |
79bb1dbd | 841 | if (rq_data_dir(req) != rq_data_dir(next)) |
b973cb7e | 842 | return NULL; |
d6d48196 | 843 | |
44981351 BVA |
844 | /* Don't merge requests with different write hints. */ |
845 | if (req->write_hint != next->write_hint) | |
846 | return NULL; | |
847 | ||
668ffc03 DLM |
848 | if (req->ioprio != next->ioprio) |
849 | return NULL; | |
850 | ||
9da3d1e9 JG |
851 | if (!blk_atomic_write_mergeable_rqs(req, next)) |
852 | return NULL; | |
853 | ||
d6d48196 JA |
854 | /* |
855 | * If we are allowed to merge, then append bio list | |
856 | * from next to rq and release next. merge_requests_fn | |
857 | * will have updated segment counts, update sector | |
445251d0 JA |
858 | * counts here. Handle DISCARDs separately, as they |
859 | * have separate settings. | |
d6d48196 | 860 | */ |
69840466 JW |
861 | |
862 | switch (blk_try_req_merge(req, next)) { | |
863 | case ELEVATOR_DISCARD_MERGE: | |
445251d0 JA |
864 | if (!req_attempt_discard_merge(q, req, next)) |
865 | return NULL; | |
69840466 JW |
866 | break; |
867 | case ELEVATOR_BACK_MERGE: | |
868 | if (!ll_merge_requests_fn(q, req, next)) | |
869 | return NULL; | |
870 | break; | |
871 | default: | |
b973cb7e | 872 | return NULL; |
69840466 | 873 | } |
d6d48196 | 874 | |
80a761fd TH |
875 | /* |
876 | * If failfast settings disagree or any of the two is already | |
877 | * a mixed merge, mark both as mixed before proceeding. This | |
878 | * makes sure that all involved bios have mixable attributes | |
879 | * set properly. | |
880 | */ | |
e8064021 | 881 | if (((req->rq_flags | next->rq_flags) & RQF_MIXED_MERGE) || |
80a761fd TH |
882 | (req->cmd_flags & REQ_FAILFAST_MASK) != |
883 | (next->cmd_flags & REQ_FAILFAST_MASK)) { | |
884 | blk_rq_set_mixed_merge(req); | |
885 | blk_rq_set_mixed_merge(next); | |
886 | } | |
887 | ||
d6d48196 | 888 | /* |
522a7775 OS |
889 | * At this point we have either done a back merge or front merge. We |
890 | * need the smaller start_time_ns of the merged requests to be the | |
891 | * current request for accounting purposes. | |
d6d48196 | 892 | */ |
522a7775 OS |
893 | if (next->start_time_ns < req->start_time_ns) |
894 | req->start_time_ns = next->start_time_ns; | |
d6d48196 JA |
895 | |
896 | req->biotail->bi_next = next->bio; | |
897 | req->biotail = next->biotail; | |
898 | ||
a2dec7b3 | 899 | req->__data_len += blk_rq_bytes(next); |
d6d48196 | 900 | |
2a5cf35c | 901 | if (!blk_discard_mergable(req)) |
445251d0 | 902 | elv_merge_requests(q, req, next); |
d6d48196 | 903 | |
9cd1e566 EB |
904 | blk_crypto_rq_put_keyslot(next); |
905 | ||
42dad764 JM |
906 | /* |
907 | * 'next' is going away, so update stats accordingly | |
908 | */ | |
b9c54f56 | 909 | blk_account_io_merge_request(next); |
d6d48196 | 910 | |
a54895fa | 911 | trace_block_rq_merge(next); |
f3bdc62f | 912 | |
e4d750c9 JA |
913 | /* |
914 | * ownership of bio passed from next to req, return 'next' for | |
915 | * the caller to free | |
916 | */ | |
1cd96c24 | 917 | next->bio = NULL; |
b973cb7e | 918 | return next; |
d6d48196 JA |
919 | } |
920 | ||
eda5cc99 CH |
921 | static struct request *attempt_back_merge(struct request_queue *q, |
922 | struct request *rq) | |
d6d48196 JA |
923 | { |
924 | struct request *next = elv_latter_request(q, rq); | |
925 | ||
926 | if (next) | |
927 | return attempt_merge(q, rq, next); | |
928 | ||
b973cb7e | 929 | return NULL; |
d6d48196 JA |
930 | } |
931 | ||
eda5cc99 CH |
932 | static struct request *attempt_front_merge(struct request_queue *q, |
933 | struct request *rq) | |
d6d48196 JA |
934 | { |
935 | struct request *prev = elv_former_request(q, rq); | |
936 | ||
937 | if (prev) | |
938 | return attempt_merge(q, prev, rq); | |
939 | ||
b973cb7e | 940 | return NULL; |
d6d48196 | 941 | } |
5e84ea3a | 942 | |
fd2ef39c JK |
943 | /* |
944 | * Try to merge 'next' into 'rq'. Return true if the merge happened, false | |
945 | * otherwise. The caller is responsible for freeing 'next' if the merge | |
946 | * happened. | |
947 | */ | |
948 | bool blk_attempt_req_merge(struct request_queue *q, struct request *rq, | |
949 | struct request *next) | |
5e84ea3a | 950 | { |
fd2ef39c | 951 | return attempt_merge(q, rq, next); |
5e84ea3a | 952 | } |
050c8ea8 TH |
953 | |
954 | bool blk_rq_merge_ok(struct request *rq, struct bio *bio) | |
955 | { | |
e2a60da7 | 956 | if (!rq_mergeable(rq) || !bio_mergeable(bio)) |
050c8ea8 TH |
957 | return false; |
958 | ||
288dab8a | 959 | if (req_op(rq) != bio_op(bio)) |
f31dc1cd MP |
960 | return false; |
961 | ||
050c8ea8 TH |
962 | /* different data direction or already started, don't merge */ |
963 | if (bio_data_dir(bio) != rq_data_dir(rq)) | |
964 | return false; | |
965 | ||
6b2b0459 TH |
966 | /* don't merge across cgroup boundaries */ |
967 | if (!blk_cgroup_mergeable(rq, bio)) | |
968 | return false; | |
969 | ||
050c8ea8 | 970 | /* only merge integrity protected bio into ditto rq */ |
4eaf99be | 971 | if (blk_integrity_merge_bio(rq->q, rq, bio) == false) |
050c8ea8 TH |
972 | return false; |
973 | ||
a892c8d5 ST |
974 | /* Only merge if the crypt contexts are compatible */ |
975 | if (!bio_crypt_rq_ctx_compatible(rq, bio)) | |
976 | return false; | |
977 | ||
44981351 BVA |
978 | /* Don't merge requests with different write hints. */ |
979 | if (rq->write_hint != bio->bi_write_hint) | |
980 | return false; | |
981 | ||
668ffc03 DLM |
982 | if (rq->ioprio != bio_prio(bio)) |
983 | return false; | |
984 | ||
9da3d1e9 JG |
985 | if (blk_atomic_write_mergeable_rq_bio(rq, bio) == false) |
986 | return false; | |
987 | ||
050c8ea8 TH |
988 | return true; |
989 | } | |
990 | ||
34fe7c05 | 991 | enum elv_merge blk_try_merge(struct request *rq, struct bio *bio) |
050c8ea8 | 992 | { |
69840466 | 993 | if (blk_discard_mergable(rq)) |
1e739730 CH |
994 | return ELEVATOR_DISCARD_MERGE; |
995 | else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector) | |
050c8ea8 | 996 | return ELEVATOR_BACK_MERGE; |
4f024f37 | 997 | else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector) |
050c8ea8 TH |
998 | return ELEVATOR_FRONT_MERGE; |
999 | return ELEVATOR_NO_MERGE; | |
1000 | } | |
8e756373 BW |
1001 | |
1002 | static void blk_account_io_merge_bio(struct request *req) | |
1003 | { | |
1004 | if (!blk_do_io_stat(req)) | |
1005 | return; | |
1006 | ||
1007 | part_stat_lock(); | |
1008 | part_stat_inc(req->part, merges[op_stat_group(req_op(req))]); | |
1009 | part_stat_unlock(); | |
1010 | } | |
1011 | ||
dd850ff3 | 1012 | enum bio_merge_status bio_attempt_back_merge(struct request *req, |
eda5cc99 | 1013 | struct bio *bio, unsigned int nr_segs) |
8e756373 | 1014 | { |
3ce6a115 | 1015 | const blk_opf_t ff = bio_failfast(bio); |
8e756373 BW |
1016 | |
1017 | if (!ll_back_merge_fn(req, bio, nr_segs)) | |
7d7ca7c5 | 1018 | return BIO_MERGE_FAILED; |
8e756373 | 1019 | |
e8a676d6 | 1020 | trace_block_bio_backmerge(bio); |
8e756373 BW |
1021 | rq_qos_merge(req->q, req, bio); |
1022 | ||
1023 | if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) | |
1024 | blk_rq_set_mixed_merge(req); | |
1025 | ||
3ce6a115 ML |
1026 | blk_update_mixed_merge(req, bio, false); |
1027 | ||
dd291d77 DLM |
1028 | if (req->rq_flags & RQF_ZONE_WRITE_PLUGGING) |
1029 | blk_zone_write_plug_bio_merged(bio); | |
1030 | ||
8e756373 BW |
1031 | req->biotail->bi_next = bio; |
1032 | req->biotail = bio; | |
1033 | req->__data_len += bio->bi_iter.bi_size; | |
1034 | ||
1035 | bio_crypt_free_ctx(bio); | |
1036 | ||
1037 | blk_account_io_merge_bio(req); | |
7d7ca7c5 | 1038 | return BIO_MERGE_OK; |
8e756373 BW |
1039 | } |
1040 | ||
eda5cc99 CH |
1041 | static enum bio_merge_status bio_attempt_front_merge(struct request *req, |
1042 | struct bio *bio, unsigned int nr_segs) | |
8e756373 | 1043 | { |
3ce6a115 | 1044 | const blk_opf_t ff = bio_failfast(bio); |
8e756373 | 1045 | |
dd291d77 DLM |
1046 | /* |
1047 | * A front merge for writes to sequential zones of a zoned block device | |
1048 | * can happen only if the user submitted writes out of order. Do not | |
1049 | * merge such write to let it fail. | |
1050 | */ | |
1051 | if (req->rq_flags & RQF_ZONE_WRITE_PLUGGING) | |
1052 | return BIO_MERGE_FAILED; | |
1053 | ||
8e756373 | 1054 | if (!ll_front_merge_fn(req, bio, nr_segs)) |
7d7ca7c5 | 1055 | return BIO_MERGE_FAILED; |
8e756373 | 1056 | |
e8a676d6 | 1057 | trace_block_bio_frontmerge(bio); |
8e756373 BW |
1058 | rq_qos_merge(req->q, req, bio); |
1059 | ||
1060 | if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) | |
1061 | blk_rq_set_mixed_merge(req); | |
1062 | ||
3ce6a115 ML |
1063 | blk_update_mixed_merge(req, bio, true); |
1064 | ||
8e756373 BW |
1065 | bio->bi_next = req->bio; |
1066 | req->bio = bio; | |
1067 | ||
1068 | req->__sector = bio->bi_iter.bi_sector; | |
1069 | req->__data_len += bio->bi_iter.bi_size; | |
1070 | ||
1071 | bio_crypt_do_front_merge(req, bio); | |
1072 | ||
1073 | blk_account_io_merge_bio(req); | |
7d7ca7c5 | 1074 | return BIO_MERGE_OK; |
8e756373 BW |
1075 | } |
1076 | ||
eda5cc99 CH |
1077 | static enum bio_merge_status bio_attempt_discard_merge(struct request_queue *q, |
1078 | struct request *req, struct bio *bio) | |
8e756373 BW |
1079 | { |
1080 | unsigned short segments = blk_rq_nr_discard_segments(req); | |
1081 | ||
1082 | if (segments >= queue_max_discard_segments(q)) | |
1083 | goto no_merge; | |
1084 | if (blk_rq_sectors(req) + bio_sectors(bio) > | |
1085 | blk_rq_get_max_sectors(req, blk_rq_pos(req))) | |
1086 | goto no_merge; | |
1087 | ||
1088 | rq_qos_merge(q, req, bio); | |
1089 | ||
1090 | req->biotail->bi_next = bio; | |
1091 | req->biotail = bio; | |
1092 | req->__data_len += bio->bi_iter.bi_size; | |
1093 | req->nr_phys_segments = segments + 1; | |
1094 | ||
1095 | blk_account_io_merge_bio(req); | |
7d7ca7c5 | 1096 | return BIO_MERGE_OK; |
8e756373 BW |
1097 | no_merge: |
1098 | req_set_nomerge(q, req); | |
7d7ca7c5 BW |
1099 | return BIO_MERGE_FAILED; |
1100 | } | |
1101 | ||
1102 | static enum bio_merge_status blk_attempt_bio_merge(struct request_queue *q, | |
1103 | struct request *rq, | |
1104 | struct bio *bio, | |
1105 | unsigned int nr_segs, | |
1106 | bool sched_allow_merge) | |
1107 | { | |
1108 | if (!blk_rq_merge_ok(rq, bio)) | |
1109 | return BIO_MERGE_NONE; | |
1110 | ||
1111 | switch (blk_try_merge(rq, bio)) { | |
1112 | case ELEVATOR_BACK_MERGE: | |
265600b7 | 1113 | if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio)) |
7d7ca7c5 BW |
1114 | return bio_attempt_back_merge(rq, bio, nr_segs); |
1115 | break; | |
1116 | case ELEVATOR_FRONT_MERGE: | |
265600b7 | 1117 | if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio)) |
7d7ca7c5 BW |
1118 | return bio_attempt_front_merge(rq, bio, nr_segs); |
1119 | break; | |
1120 | case ELEVATOR_DISCARD_MERGE: | |
1121 | return bio_attempt_discard_merge(q, rq, bio); | |
1122 | default: | |
1123 | return BIO_MERGE_NONE; | |
1124 | } | |
1125 | ||
1126 | return BIO_MERGE_FAILED; | |
8e756373 BW |
1127 | } |
1128 | ||
1129 | /** | |
1130 | * blk_attempt_plug_merge - try to merge with %current's plugged list | |
1131 | * @q: request_queue new bio is being queued at | |
1132 | * @bio: new bio being queued | |
1133 | * @nr_segs: number of segments in @bio | |
87c037d1 | 1134 | * from the passed in @q already in the plug list |
8e756373 | 1135 | * |
d38a9c04 JA |
1136 | * Determine whether @bio being queued on @q can be merged with the previous |
1137 | * request on %current's plugged list. Returns %true if merge was successful, | |
8e756373 BW |
1138 | * otherwise %false. |
1139 | * | |
1140 | * Plugging coalesces IOs from the same issuer for the same purpose without | |
1141 | * going through @q->queue_lock. As such it's more of an issuing mechanism | |
1142 | * than scheduling, and the request, while may have elvpriv data, is not | |
1143 | * added on the elevator at this point. In addition, we don't have | |
1144 | * reliable access to the elevator outside queue lock. Only check basic | |
1145 | * merging parameters without querying the elevator. | |
1146 | * | |
1147 | * Caller must ensure !blk_queue_nomerges(q) beforehand. | |
1148 | */ | |
1149 | bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, | |
0c5bcc92 | 1150 | unsigned int nr_segs) |
8e756373 | 1151 | { |
99a9476b | 1152 | struct blk_plug *plug = current->plug; |
8e756373 | 1153 | struct request *rq; |
8e756373 | 1154 | |
bc490f81 | 1155 | if (!plug || rq_list_empty(plug->mq_list)) |
8e756373 BW |
1156 | return false; |
1157 | ||
5b205071 JA |
1158 | rq_list_for_each(&plug->mq_list, rq) { |
1159 | if (rq->q == q) { | |
1160 | if (blk_attempt_bio_merge(q, rq, bio, nr_segs, false) == | |
1161 | BIO_MERGE_OK) | |
1162 | return true; | |
1163 | break; | |
1164 | } | |
1165 | ||
1166 | /* | |
1167 | * Only keep iterating plug list for merges if we have multiple | |
1168 | * queues | |
1169 | */ | |
1170 | if (!plug->multiple_queues) | |
1171 | break; | |
8e756373 | 1172 | } |
8e756373 BW |
1173 | return false; |
1174 | } | |
bdc6a287 BW |
1175 | |
1176 | /* | |
1177 | * Iterate list of requests and see if we can merge this bio with any | |
1178 | * of them. | |
1179 | */ | |
1180 | bool blk_bio_list_merge(struct request_queue *q, struct list_head *list, | |
1181 | struct bio *bio, unsigned int nr_segs) | |
1182 | { | |
1183 | struct request *rq; | |
1184 | int checked = 8; | |
1185 | ||
1186 | list_for_each_entry_reverse(rq, list, queuelist) { | |
bdc6a287 BW |
1187 | if (!checked--) |
1188 | break; | |
1189 | ||
7d7ca7c5 BW |
1190 | switch (blk_attempt_bio_merge(q, rq, bio, nr_segs, true)) { |
1191 | case BIO_MERGE_NONE: | |
bdc6a287 | 1192 | continue; |
7d7ca7c5 BW |
1193 | case BIO_MERGE_OK: |
1194 | return true; | |
1195 | case BIO_MERGE_FAILED: | |
1196 | return false; | |
bdc6a287 BW |
1197 | } |
1198 | ||
bdc6a287 BW |
1199 | } |
1200 | ||
1201 | return false; | |
1202 | } | |
1203 | EXPORT_SYMBOL_GPL(blk_bio_list_merge); | |
eda5cc99 CH |
1204 | |
1205 | bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio, | |
1206 | unsigned int nr_segs, struct request **merged_request) | |
1207 | { | |
1208 | struct request *rq; | |
1209 | ||
1210 | switch (elv_merge(q, &rq, bio)) { | |
1211 | case ELEVATOR_BACK_MERGE: | |
1212 | if (!blk_mq_sched_allow_merge(q, rq, bio)) | |
1213 | return false; | |
1214 | if (bio_attempt_back_merge(rq, bio, nr_segs) != BIO_MERGE_OK) | |
1215 | return false; | |
1216 | *merged_request = attempt_back_merge(q, rq); | |
1217 | if (!*merged_request) | |
1218 | elv_merged_request(q, rq, ELEVATOR_BACK_MERGE); | |
1219 | return true; | |
1220 | case ELEVATOR_FRONT_MERGE: | |
1221 | if (!blk_mq_sched_allow_merge(q, rq, bio)) | |
1222 | return false; | |
1223 | if (bio_attempt_front_merge(rq, bio, nr_segs) != BIO_MERGE_OK) | |
1224 | return false; | |
1225 | *merged_request = attempt_front_merge(q, rq); | |
1226 | if (!*merged_request) | |
1227 | elv_merged_request(q, rq, ELEVATOR_FRONT_MERGE); | |
1228 | return true; | |
1229 | case ELEVATOR_DISCARD_MERGE: | |
1230 | return bio_attempt_discard_merge(q, rq, bio) == BIO_MERGE_OK; | |
1231 | default: | |
1232 | return false; | |
1233 | } | |
1234 | } | |
1235 | EXPORT_SYMBOL_GPL(blk_mq_sched_try_merge); |