Commit | Line | Data |
---|---|---|
d6d48196 JA |
1 | /* |
2 | * Functions related to segment and merge handling | |
3 | */ | |
4 | #include <linux/kernel.h> | |
5 | #include <linux/module.h> | |
6 | #include <linux/bio.h> | |
7 | #include <linux/blkdev.h> | |
8 | #include <linux/scatterlist.h> | |
9 | ||
10 | #include "blk.h" | |
11 | ||
12 | void blk_recalc_rq_sectors(struct request *rq, int nsect) | |
13 | { | |
e17fc0a1 | 14 | if (blk_fs_request(rq) || blk_discard_rq(rq)) { |
d6d48196 JA |
15 | rq->hard_sector += nsect; |
16 | rq->hard_nr_sectors -= nsect; | |
17 | ||
18 | /* | |
19 | * Move the I/O submission pointers ahead if required. | |
20 | */ | |
21 | if ((rq->nr_sectors >= rq->hard_nr_sectors) && | |
22 | (rq->sector <= rq->hard_sector)) { | |
23 | rq->sector = rq->hard_sector; | |
24 | rq->nr_sectors = rq->hard_nr_sectors; | |
25 | rq->hard_cur_sectors = bio_cur_sectors(rq->bio); | |
26 | rq->current_nr_sectors = rq->hard_cur_sectors; | |
27 | rq->buffer = bio_data(rq->bio); | |
28 | } | |
29 | ||
30 | /* | |
31 | * if total number of sectors is less than the first segment | |
32 | * size, something has gone terribly wrong | |
33 | */ | |
34 | if (rq->nr_sectors < rq->current_nr_sectors) { | |
6728cb0e | 35 | printk(KERN_ERR "blk: request botched\n"); |
d6d48196 JA |
36 | rq->nr_sectors = rq->current_nr_sectors; |
37 | } | |
38 | } | |
39 | } | |
40 | ||
41 | void blk_recalc_rq_segments(struct request *rq) | |
42 | { | |
43 | int nr_phys_segs; | |
44 | int nr_hw_segs; | |
45 | unsigned int phys_size; | |
46 | unsigned int hw_size; | |
47 | struct bio_vec *bv, *bvprv = NULL; | |
48 | int seg_size; | |
49 | int hw_seg_size; | |
50 | int cluster; | |
51 | struct req_iterator iter; | |
52 | int high, highprv = 1; | |
53 | struct request_queue *q = rq->q; | |
54 | ||
55 | if (!rq->bio) | |
56 | return; | |
57 | ||
75ad23bc | 58 | cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); |
d6d48196 JA |
59 | hw_seg_size = seg_size = 0; |
60 | phys_size = hw_size = nr_phys_segs = nr_hw_segs = 0; | |
61 | rq_for_each_segment(bv, rq, iter) { | |
62 | /* | |
63 | * the trick here is making sure that a high page is never | |
64 | * considered part of another segment, since that might | |
65 | * change with the bounce page. | |
66 | */ | |
67 | high = page_to_pfn(bv->bv_page) > q->bounce_pfn; | |
68 | if (high || highprv) | |
69 | goto new_hw_segment; | |
70 | if (cluster) { | |
71 | if (seg_size + bv->bv_len > q->max_segment_size) | |
72 | goto new_segment; | |
73 | if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv)) | |
74 | goto new_segment; | |
75 | if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv)) | |
76 | goto new_segment; | |
77 | if (BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len)) | |
78 | goto new_hw_segment; | |
79 | ||
80 | seg_size += bv->bv_len; | |
81 | hw_seg_size += bv->bv_len; | |
82 | bvprv = bv; | |
83 | continue; | |
84 | } | |
85 | new_segment: | |
86 | if (BIOVEC_VIRT_MERGEABLE(bvprv, bv) && | |
87 | !BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len)) | |
88 | hw_seg_size += bv->bv_len; | |
89 | else { | |
90 | new_hw_segment: | |
91 | if (nr_hw_segs == 1 && | |
92 | hw_seg_size > rq->bio->bi_hw_front_size) | |
93 | rq->bio->bi_hw_front_size = hw_seg_size; | |
94 | hw_seg_size = BIOVEC_VIRT_START_SIZE(bv) + bv->bv_len; | |
95 | nr_hw_segs++; | |
96 | } | |
97 | ||
98 | nr_phys_segs++; | |
99 | bvprv = bv; | |
100 | seg_size = bv->bv_len; | |
101 | highprv = high; | |
102 | } | |
103 | ||
104 | if (nr_hw_segs == 1 && | |
105 | hw_seg_size > rq->bio->bi_hw_front_size) | |
106 | rq->bio->bi_hw_front_size = hw_seg_size; | |
107 | if (hw_seg_size > rq->biotail->bi_hw_back_size) | |
108 | rq->biotail->bi_hw_back_size = hw_seg_size; | |
109 | rq->nr_phys_segments = nr_phys_segs; | |
110 | rq->nr_hw_segments = nr_hw_segs; | |
111 | } | |
112 | ||
113 | void blk_recount_segments(struct request_queue *q, struct bio *bio) | |
114 | { | |
115 | struct request rq; | |
116 | struct bio *nxt = bio->bi_next; | |
117 | rq.q = q; | |
118 | rq.bio = rq.biotail = bio; | |
119 | bio->bi_next = NULL; | |
120 | blk_recalc_rq_segments(&rq); | |
121 | bio->bi_next = nxt; | |
122 | bio->bi_phys_segments = rq.nr_phys_segments; | |
123 | bio->bi_hw_segments = rq.nr_hw_segments; | |
124 | bio->bi_flags |= (1 << BIO_SEG_VALID); | |
125 | } | |
126 | EXPORT_SYMBOL(blk_recount_segments); | |
127 | ||
128 | static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, | |
129 | struct bio *nxt) | |
130 | { | |
75ad23bc | 131 | if (!test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags)) |
d6d48196 JA |
132 | return 0; |
133 | ||
d6d48196 JA |
134 | if (bio->bi_size + nxt->bi_size > q->max_segment_size) |
135 | return 0; | |
136 | ||
e17fc0a1 DW |
137 | if (!bio_has_data(bio)) |
138 | return 1; | |
139 | ||
140 | if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt))) | |
141 | return 0; | |
142 | ||
d6d48196 | 143 | /* |
e17fc0a1 | 144 | * bio and nxt are contiguous in memory; check if the queue allows |
d6d48196 JA |
145 | * these two to be merged into one |
146 | */ | |
147 | if (BIO_SEG_BOUNDARY(q, bio, nxt)) | |
148 | return 1; | |
149 | ||
150 | return 0; | |
151 | } | |
152 | ||
153 | static int blk_hw_contig_segment(struct request_queue *q, struct bio *bio, | |
154 | struct bio *nxt) | |
155 | { | |
2cdf79ca | 156 | if (!bio_flagged(bio, BIO_SEG_VALID)) |
d6d48196 | 157 | blk_recount_segments(q, bio); |
2cdf79ca | 158 | if (!bio_flagged(nxt, BIO_SEG_VALID)) |
d6d48196 | 159 | blk_recount_segments(q, nxt); |
e17fc0a1 DW |
160 | if (bio_has_data(bio) && |
161 | (!BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)) || | |
162 | BIOVEC_VIRT_OVERSIZE(bio->bi_hw_back_size + nxt->bi_hw_front_size))) | |
d6d48196 JA |
163 | return 0; |
164 | if (bio->bi_hw_back_size + nxt->bi_hw_front_size > q->max_segment_size) | |
165 | return 0; | |
166 | ||
167 | return 1; | |
168 | } | |
169 | ||
170 | /* | |
171 | * map a request to scatterlist, return number of sg entries setup. Caller | |
172 | * must make sure sg can hold rq->nr_phys_segments entries | |
173 | */ | |
174 | int blk_rq_map_sg(struct request_queue *q, struct request *rq, | |
175 | struct scatterlist *sglist) | |
176 | { | |
177 | struct bio_vec *bvec, *bvprv; | |
178 | struct req_iterator iter; | |
179 | struct scatterlist *sg; | |
180 | int nsegs, cluster; | |
181 | ||
182 | nsegs = 0; | |
75ad23bc | 183 | cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); |
d6d48196 JA |
184 | |
185 | /* | |
186 | * for each bio in rq | |
187 | */ | |
188 | bvprv = NULL; | |
189 | sg = NULL; | |
190 | rq_for_each_segment(bvec, rq, iter) { | |
191 | int nbytes = bvec->bv_len; | |
192 | ||
193 | if (bvprv && cluster) { | |
194 | if (sg->length + nbytes > q->max_segment_size) | |
195 | goto new_segment; | |
196 | ||
197 | if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) | |
198 | goto new_segment; | |
199 | if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec)) | |
200 | goto new_segment; | |
201 | ||
202 | sg->length += nbytes; | |
203 | } else { | |
204 | new_segment: | |
205 | if (!sg) | |
206 | sg = sglist; | |
207 | else { | |
208 | /* | |
209 | * If the driver previously mapped a shorter | |
210 | * list, we could see a termination bit | |
211 | * prematurely unless it fully inits the sg | |
212 | * table on each mapping. We KNOW that there | |
213 | * must be more entries here or the driver | |
214 | * would be buggy, so force clear the | |
215 | * termination bit to avoid doing a full | |
216 | * sg_init_table() in drivers for each command. | |
217 | */ | |
218 | sg->page_link &= ~0x02; | |
219 | sg = sg_next(sg); | |
220 | } | |
221 | ||
222 | sg_set_page(sg, bvec->bv_page, nbytes, bvec->bv_offset); | |
223 | nsegs++; | |
224 | } | |
225 | bvprv = bvec; | |
226 | } /* segments in rq */ | |
227 | ||
f18573ab FT |
228 | |
229 | if (unlikely(rq->cmd_flags & REQ_COPY_USER) && | |
230 | (rq->data_len & q->dma_pad_mask)) { | |
231 | unsigned int pad_len = (q->dma_pad_mask & ~rq->data_len) + 1; | |
232 | ||
233 | sg->length += pad_len; | |
234 | rq->extra_len += pad_len; | |
235 | } | |
236 | ||
2fb98e84 | 237 | if (q->dma_drain_size && q->dma_drain_needed(rq)) { |
db0a2e00 TH |
238 | if (rq->cmd_flags & REQ_RW) |
239 | memset(q->dma_drain_buffer, 0, q->dma_drain_size); | |
240 | ||
d6d48196 JA |
241 | sg->page_link &= ~0x02; |
242 | sg = sg_next(sg); | |
243 | sg_set_page(sg, virt_to_page(q->dma_drain_buffer), | |
244 | q->dma_drain_size, | |
245 | ((unsigned long)q->dma_drain_buffer) & | |
246 | (PAGE_SIZE - 1)); | |
247 | nsegs++; | |
7a85f889 | 248 | rq->extra_len += q->dma_drain_size; |
d6d48196 JA |
249 | } |
250 | ||
251 | if (sg) | |
252 | sg_mark_end(sg); | |
253 | ||
254 | return nsegs; | |
255 | } | |
d6d48196 JA |
256 | EXPORT_SYMBOL(blk_rq_map_sg); |
257 | ||
258 | static inline int ll_new_mergeable(struct request_queue *q, | |
259 | struct request *req, | |
260 | struct bio *bio) | |
261 | { | |
262 | int nr_phys_segs = bio_phys_segments(q, bio); | |
263 | ||
264 | if (req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) { | |
265 | req->cmd_flags |= REQ_NOMERGE; | |
266 | if (req == q->last_merge) | |
267 | q->last_merge = NULL; | |
268 | return 0; | |
269 | } | |
270 | ||
271 | /* | |
272 | * A hw segment is just getting larger, bump just the phys | |
273 | * counter. | |
274 | */ | |
275 | req->nr_phys_segments += nr_phys_segs; | |
276 | return 1; | |
277 | } | |
278 | ||
279 | static inline int ll_new_hw_segment(struct request_queue *q, | |
280 | struct request *req, | |
281 | struct bio *bio) | |
282 | { | |
283 | int nr_hw_segs = bio_hw_segments(q, bio); | |
284 | int nr_phys_segs = bio_phys_segments(q, bio); | |
285 | ||
286 | if (req->nr_hw_segments + nr_hw_segs > q->max_hw_segments | |
287 | || req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) { | |
288 | req->cmd_flags |= REQ_NOMERGE; | |
289 | if (req == q->last_merge) | |
290 | q->last_merge = NULL; | |
291 | return 0; | |
292 | } | |
293 | ||
294 | /* | |
295 | * This will form the start of a new hw segment. Bump both | |
296 | * counters. | |
297 | */ | |
298 | req->nr_hw_segments += nr_hw_segs; | |
299 | req->nr_phys_segments += nr_phys_segs; | |
300 | return 1; | |
301 | } | |
302 | ||
303 | int ll_back_merge_fn(struct request_queue *q, struct request *req, | |
304 | struct bio *bio) | |
305 | { | |
306 | unsigned short max_sectors; | |
307 | int len; | |
308 | ||
309 | if (unlikely(blk_pc_request(req))) | |
310 | max_sectors = q->max_hw_sectors; | |
311 | else | |
312 | max_sectors = q->max_sectors; | |
313 | ||
314 | if (req->nr_sectors + bio_sectors(bio) > max_sectors) { | |
315 | req->cmd_flags |= REQ_NOMERGE; | |
316 | if (req == q->last_merge) | |
317 | q->last_merge = NULL; | |
318 | return 0; | |
319 | } | |
2cdf79ca | 320 | if (!bio_flagged(req->biotail, BIO_SEG_VALID)) |
d6d48196 | 321 | blk_recount_segments(q, req->biotail); |
2cdf79ca | 322 | if (!bio_flagged(bio, BIO_SEG_VALID)) |
d6d48196 JA |
323 | blk_recount_segments(q, bio); |
324 | len = req->biotail->bi_hw_back_size + bio->bi_hw_front_size; | |
e17fc0a1 DW |
325 | if (!bio_has_data(bio) || |
326 | (BIOVEC_VIRT_MERGEABLE(__BVEC_END(req->biotail), __BVEC_START(bio)) | |
327 | && !BIOVEC_VIRT_OVERSIZE(len))) { | |
d6d48196 JA |
328 | int mergeable = ll_new_mergeable(q, req, bio); |
329 | ||
330 | if (mergeable) { | |
331 | if (req->nr_hw_segments == 1) | |
332 | req->bio->bi_hw_front_size = len; | |
333 | if (bio->bi_hw_segments == 1) | |
334 | bio->bi_hw_back_size = len; | |
335 | } | |
336 | return mergeable; | |
337 | } | |
338 | ||
339 | return ll_new_hw_segment(q, req, bio); | |
340 | } | |
341 | ||
6728cb0e | 342 | int ll_front_merge_fn(struct request_queue *q, struct request *req, |
d6d48196 JA |
343 | struct bio *bio) |
344 | { | |
345 | unsigned short max_sectors; | |
346 | int len; | |
347 | ||
348 | if (unlikely(blk_pc_request(req))) | |
349 | max_sectors = q->max_hw_sectors; | |
350 | else | |
351 | max_sectors = q->max_sectors; | |
352 | ||
353 | ||
354 | if (req->nr_sectors + bio_sectors(bio) > max_sectors) { | |
355 | req->cmd_flags |= REQ_NOMERGE; | |
356 | if (req == q->last_merge) | |
357 | q->last_merge = NULL; | |
358 | return 0; | |
359 | } | |
360 | len = bio->bi_hw_back_size + req->bio->bi_hw_front_size; | |
2cdf79ca | 361 | if (!bio_flagged(bio, BIO_SEG_VALID)) |
d6d48196 | 362 | blk_recount_segments(q, bio); |
2cdf79ca | 363 | if (!bio_flagged(req->bio, BIO_SEG_VALID)) |
d6d48196 | 364 | blk_recount_segments(q, req->bio); |
e17fc0a1 DW |
365 | if (!bio_has_data(bio) || |
366 | (BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(req->bio)) && | |
367 | !BIOVEC_VIRT_OVERSIZE(len))) { | |
d6d48196 JA |
368 | int mergeable = ll_new_mergeable(q, req, bio); |
369 | ||
370 | if (mergeable) { | |
371 | if (bio->bi_hw_segments == 1) | |
372 | bio->bi_hw_front_size = len; | |
373 | if (req->nr_hw_segments == 1) | |
374 | req->biotail->bi_hw_back_size = len; | |
375 | } | |
376 | return mergeable; | |
377 | } | |
378 | ||
379 | return ll_new_hw_segment(q, req, bio); | |
380 | } | |
381 | ||
382 | static int ll_merge_requests_fn(struct request_queue *q, struct request *req, | |
383 | struct request *next) | |
384 | { | |
385 | int total_phys_segments; | |
386 | int total_hw_segments; | |
387 | ||
388 | /* | |
389 | * First check if the either of the requests are re-queued | |
390 | * requests. Can't merge them if they are. | |
391 | */ | |
392 | if (req->special || next->special) | |
393 | return 0; | |
394 | ||
395 | /* | |
396 | * Will it become too large? | |
397 | */ | |
398 | if ((req->nr_sectors + next->nr_sectors) > q->max_sectors) | |
399 | return 0; | |
400 | ||
401 | total_phys_segments = req->nr_phys_segments + next->nr_phys_segments; | |
402 | if (blk_phys_contig_segment(q, req->biotail, next->bio)) | |
403 | total_phys_segments--; | |
404 | ||
405 | if (total_phys_segments > q->max_phys_segments) | |
406 | return 0; | |
407 | ||
408 | total_hw_segments = req->nr_hw_segments + next->nr_hw_segments; | |
409 | if (blk_hw_contig_segment(q, req->biotail, next->bio)) { | |
6728cb0e JA |
410 | int len = req->biotail->bi_hw_back_size + |
411 | next->bio->bi_hw_front_size; | |
d6d48196 JA |
412 | /* |
413 | * propagate the combined length to the end of the requests | |
414 | */ | |
415 | if (req->nr_hw_segments == 1) | |
416 | req->bio->bi_hw_front_size = len; | |
417 | if (next->nr_hw_segments == 1) | |
418 | next->biotail->bi_hw_back_size = len; | |
419 | total_hw_segments--; | |
420 | } | |
421 | ||
422 | if (total_hw_segments > q->max_hw_segments) | |
423 | return 0; | |
424 | ||
425 | /* Merge is OK... */ | |
426 | req->nr_phys_segments = total_phys_segments; | |
427 | req->nr_hw_segments = total_hw_segments; | |
428 | return 1; | |
429 | } | |
430 | ||
431 | /* | |
432 | * Has to be called with the request spinlock acquired | |
433 | */ | |
434 | static int attempt_merge(struct request_queue *q, struct request *req, | |
435 | struct request *next) | |
436 | { | |
437 | if (!rq_mergeable(req) || !rq_mergeable(next)) | |
438 | return 0; | |
439 | ||
440 | /* | |
441 | * not contiguous | |
442 | */ | |
443 | if (req->sector + req->nr_sectors != next->sector) | |
444 | return 0; | |
445 | ||
446 | if (rq_data_dir(req) != rq_data_dir(next) | |
447 | || req->rq_disk != next->rq_disk | |
448 | || next->special) | |
449 | return 0; | |
450 | ||
7ba1ba12 MP |
451 | if (blk_integrity_rq(req) != blk_integrity_rq(next)) |
452 | return 0; | |
453 | ||
d6d48196 JA |
454 | /* |
455 | * If we are allowed to merge, then append bio list | |
456 | * from next to rq and release next. merge_requests_fn | |
457 | * will have updated segment counts, update sector | |
458 | * counts here. | |
459 | */ | |
460 | if (!ll_merge_requests_fn(q, req, next)) | |
461 | return 0; | |
462 | ||
463 | /* | |
464 | * At this point we have either done a back merge | |
465 | * or front merge. We need the smaller start_time of | |
466 | * the merged requests to be the current request | |
467 | * for accounting purposes. | |
468 | */ | |
469 | if (time_after(req->start_time, next->start_time)) | |
470 | req->start_time = next->start_time; | |
471 | ||
472 | req->biotail->bi_next = next->bio; | |
473 | req->biotail = next->biotail; | |
474 | ||
475 | req->nr_sectors = req->hard_nr_sectors += next->hard_nr_sectors; | |
476 | ||
477 | elv_merge_requests(q, req, next); | |
478 | ||
479 | if (req->rq_disk) { | |
6f2576af JM |
480 | struct hd_struct *part |
481 | = get_part(req->rq_disk, req->sector); | |
d6d48196 JA |
482 | disk_round_stats(req->rq_disk); |
483 | req->rq_disk->in_flight--; | |
6f2576af JM |
484 | if (part) { |
485 | part_round_stats(part); | |
486 | part->in_flight--; | |
487 | } | |
d6d48196 JA |
488 | } |
489 | ||
490 | req->ioprio = ioprio_best(req->ioprio, next->ioprio); | |
491 | ||
492 | __blk_put_request(q, next); | |
493 | return 1; | |
494 | } | |
495 | ||
496 | int attempt_back_merge(struct request_queue *q, struct request *rq) | |
497 | { | |
498 | struct request *next = elv_latter_request(q, rq); | |
499 | ||
500 | if (next) | |
501 | return attempt_merge(q, rq, next); | |
502 | ||
503 | return 0; | |
504 | } | |
505 | ||
506 | int attempt_front_merge(struct request_queue *q, struct request *rq) | |
507 | { | |
508 | struct request *prev = elv_former_request(q, rq); | |
509 | ||
510 | if (prev) | |
511 | return attempt_merge(q, prev, rq); | |
512 | ||
513 | return 0; | |
514 | } |