Commit | Line | Data |
---|---|---|
3dcf60bc | 1 | // SPDX-License-Identifier: GPL-2.0 |
945ffb60 JA |
2 | /* |
3 | * MQ Deadline i/o scheduler - adaptation of the legacy deadline scheduler, | |
4 | * for the blk-mq scheduling framework | |
5 | * | |
6 | * Copyright (C) 2016 Jens Axboe <axboe@kernel.dk> | |
7 | */ | |
8 | #include <linux/kernel.h> | |
9 | #include <linux/fs.h> | |
10 | #include <linux/blkdev.h> | |
11 | #include <linux/blk-mq.h> | |
12 | #include <linux/elevator.h> | |
13 | #include <linux/bio.h> | |
14 | #include <linux/module.h> | |
15 | #include <linux/slab.h> | |
16 | #include <linux/init.h> | |
17 | #include <linux/compiler.h> | |
18 | #include <linux/rbtree.h> | |
19 | #include <linux/sbitmap.h> | |
20 | ||
b357e4a6 CK |
21 | #include <trace/events/block.h> |
22 | ||
945ffb60 JA |
23 | #include "blk.h" |
24 | #include "blk-mq.h" | |
daaadb3e | 25 | #include "blk-mq-debugfs.h" |
945ffb60 JA |
26 | #include "blk-mq-tag.h" |
27 | #include "blk-mq-sched.h" | |
28 | ||
29 | /* | |
898bd37a | 30 | * See Documentation/block/deadline-iosched.rst |
945ffb60 JA |
31 | */ |
32 | static const int read_expire = HZ / 2; /* max time before a read is submitted. */ | |
33 | static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */ | |
34 | static const int writes_starved = 2; /* max times reads can starve a write */ | |
35 | static const int fifo_batch = 16; /* # of sequential requests treated as one | |
36 | by the above parameters. For throughput. */ | |
37 | ||
38 | struct deadline_data { | |
39 | /* | |
40 | * run time data | |
41 | */ | |
42 | ||
43 | /* | |
44 | * requests (deadline_rq s) are present on both sort_list and fifo_list | |
45 | */ | |
46 | struct rb_root sort_list[2]; | |
47 | struct list_head fifo_list[2]; | |
48 | ||
49 | /* | |
50 | * next in sort order. read, write or both are NULL | |
51 | */ | |
52 | struct request *next_rq[2]; | |
53 | unsigned int batching; /* number of sequential requests made */ | |
54 | unsigned int starved; /* times reads have starved writes */ | |
55 | ||
56 | /* | |
57 | * settings that change how the i/o scheduler behaves | |
58 | */ | |
59 | int fifo_expire[2]; | |
60 | int fifo_batch; | |
61 | int writes_starved; | |
62 | int front_merges; | |
63 | ||
64 | spinlock_t lock; | |
5700f691 | 65 | spinlock_t zone_lock; |
945ffb60 JA |
66 | struct list_head dispatch; |
67 | }; | |
68 | ||
69 | static inline struct rb_root * | |
70 | deadline_rb_root(struct deadline_data *dd, struct request *rq) | |
71 | { | |
72 | return &dd->sort_list[rq_data_dir(rq)]; | |
73 | } | |
74 | ||
75 | /* | |
76 | * get the request after `rq' in sector-sorted order | |
77 | */ | |
78 | static inline struct request * | |
79 | deadline_latter_request(struct request *rq) | |
80 | { | |
81 | struct rb_node *node = rb_next(&rq->rb_node); | |
82 | ||
83 | if (node) | |
84 | return rb_entry_rq(node); | |
85 | ||
86 | return NULL; | |
87 | } | |
88 | ||
89 | static void | |
90 | deadline_add_rq_rb(struct deadline_data *dd, struct request *rq) | |
91 | { | |
92 | struct rb_root *root = deadline_rb_root(dd, rq); | |
93 | ||
94 | elv_rb_add(root, rq); | |
95 | } | |
96 | ||
97 | static inline void | |
98 | deadline_del_rq_rb(struct deadline_data *dd, struct request *rq) | |
99 | { | |
100 | const int data_dir = rq_data_dir(rq); | |
101 | ||
102 | if (dd->next_rq[data_dir] == rq) | |
103 | dd->next_rq[data_dir] = deadline_latter_request(rq); | |
104 | ||
105 | elv_rb_del(deadline_rb_root(dd, rq), rq); | |
106 | } | |
107 | ||
108 | /* | |
109 | * remove rq from rbtree and fifo. | |
110 | */ | |
111 | static void deadline_remove_request(struct request_queue *q, struct request *rq) | |
112 | { | |
113 | struct deadline_data *dd = q->elevator->elevator_data; | |
114 | ||
115 | list_del_init(&rq->queuelist); | |
116 | ||
117 | /* | |
118 | * We might not be on the rbtree, if we are doing an insert merge | |
119 | */ | |
120 | if (!RB_EMPTY_NODE(&rq->rb_node)) | |
121 | deadline_del_rq_rb(dd, rq); | |
122 | ||
123 | elv_rqhash_del(q, rq); | |
124 | if (q->last_merge == rq) | |
125 | q->last_merge = NULL; | |
126 | } | |
127 | ||
128 | static void dd_request_merged(struct request_queue *q, struct request *req, | |
34fe7c05 | 129 | enum elv_merge type) |
945ffb60 JA |
130 | { |
131 | struct deadline_data *dd = q->elevator->elevator_data; | |
132 | ||
133 | /* | |
134 | * if the merge was a front merge, we need to reposition request | |
135 | */ | |
136 | if (type == ELEVATOR_FRONT_MERGE) { | |
137 | elv_rb_del(deadline_rb_root(dd, req), req); | |
138 | deadline_add_rq_rb(dd, req); | |
139 | } | |
140 | } | |
141 | ||
142 | static void dd_merged_requests(struct request_queue *q, struct request *req, | |
143 | struct request *next) | |
144 | { | |
145 | /* | |
146 | * if next expires before rq, assign its expire time to rq | |
147 | * and move into next position (next will be deleted) in fifo | |
148 | */ | |
149 | if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) { | |
150 | if (time_before((unsigned long)next->fifo_time, | |
151 | (unsigned long)req->fifo_time)) { | |
152 | list_move(&req->queuelist, &next->queuelist); | |
153 | req->fifo_time = next->fifo_time; | |
154 | } | |
155 | } | |
156 | ||
157 | /* | |
158 | * kill knowledge of next, this one is a goner | |
159 | */ | |
160 | deadline_remove_request(q, next); | |
161 | } | |
162 | ||
163 | /* | |
164 | * move an entry to dispatch queue | |
165 | */ | |
166 | static void | |
167 | deadline_move_request(struct deadline_data *dd, struct request *rq) | |
168 | { | |
169 | const int data_dir = rq_data_dir(rq); | |
170 | ||
171 | dd->next_rq[READ] = NULL; | |
172 | dd->next_rq[WRITE] = NULL; | |
173 | dd->next_rq[data_dir] = deadline_latter_request(rq); | |
174 | ||
175 | /* | |
176 | * take it off the sort and fifo list | |
177 | */ | |
178 | deadline_remove_request(rq->q, rq); | |
179 | } | |
180 | ||
181 | /* | |
182 | * deadline_check_fifo returns 0 if there are no expired requests on the fifo, | |
183 | * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir]) | |
184 | */ | |
185 | static inline int deadline_check_fifo(struct deadline_data *dd, int ddir) | |
186 | { | |
187 | struct request *rq = rq_entry_fifo(dd->fifo_list[ddir].next); | |
188 | ||
189 | /* | |
190 | * rq is expired! | |
191 | */ | |
192 | if (time_after_eq(jiffies, (unsigned long)rq->fifo_time)) | |
193 | return 1; | |
194 | ||
195 | return 0; | |
196 | } | |
197 | ||
bf09ce56 DLM |
198 | /* |
199 | * For the specified data direction, return the next request to | |
200 | * dispatch using arrival ordered lists. | |
201 | */ | |
202 | static struct request * | |
203 | deadline_fifo_request(struct deadline_data *dd, int data_dir) | |
204 | { | |
5700f691 DLM |
205 | struct request *rq; |
206 | unsigned long flags; | |
207 | ||
bf09ce56 DLM |
208 | if (WARN_ON_ONCE(data_dir != READ && data_dir != WRITE)) |
209 | return NULL; | |
210 | ||
211 | if (list_empty(&dd->fifo_list[data_dir])) | |
212 | return NULL; | |
213 | ||
5700f691 DLM |
214 | rq = rq_entry_fifo(dd->fifo_list[data_dir].next); |
215 | if (data_dir == READ || !blk_queue_is_zoned(rq->q)) | |
216 | return rq; | |
217 | ||
218 | /* | |
219 | * Look for a write request that can be dispatched, that is one with | |
220 | * an unlocked target zone. | |
221 | */ | |
222 | spin_lock_irqsave(&dd->zone_lock, flags); | |
223 | list_for_each_entry(rq, &dd->fifo_list[WRITE], queuelist) { | |
224 | if (blk_req_can_dispatch_to_zone(rq)) | |
225 | goto out; | |
226 | } | |
227 | rq = NULL; | |
228 | out: | |
229 | spin_unlock_irqrestore(&dd->zone_lock, flags); | |
230 | ||
231 | return rq; | |
bf09ce56 DLM |
232 | } |
233 | ||
234 | /* | |
235 | * For the specified data direction, return the next request to | |
236 | * dispatch using sector position sorted lists. | |
237 | */ | |
238 | static struct request * | |
239 | deadline_next_request(struct deadline_data *dd, int data_dir) | |
240 | { | |
5700f691 DLM |
241 | struct request *rq; |
242 | unsigned long flags; | |
243 | ||
bf09ce56 DLM |
244 | if (WARN_ON_ONCE(data_dir != READ && data_dir != WRITE)) |
245 | return NULL; | |
246 | ||
5700f691 DLM |
247 | rq = dd->next_rq[data_dir]; |
248 | if (!rq) | |
249 | return NULL; | |
250 | ||
251 | if (data_dir == READ || !blk_queue_is_zoned(rq->q)) | |
252 | return rq; | |
253 | ||
254 | /* | |
255 | * Look for a write request that can be dispatched, that is one with | |
256 | * an unlocked target zone. | |
257 | */ | |
258 | spin_lock_irqsave(&dd->zone_lock, flags); | |
259 | while (rq) { | |
260 | if (blk_req_can_dispatch_to_zone(rq)) | |
261 | break; | |
262 | rq = deadline_latter_request(rq); | |
263 | } | |
264 | spin_unlock_irqrestore(&dd->zone_lock, flags); | |
265 | ||
266 | return rq; | |
bf09ce56 DLM |
267 | } |
268 | ||
945ffb60 JA |
269 | /* |
270 | * deadline_dispatch_requests selects the best request according to | |
271 | * read/write expire, fifo_batch, etc | |
272 | */ | |
ca11f209 | 273 | static struct request *__dd_dispatch_request(struct deadline_data *dd) |
945ffb60 | 274 | { |
bf09ce56 | 275 | struct request *rq, *next_rq; |
945ffb60 JA |
276 | bool reads, writes; |
277 | int data_dir; | |
278 | ||
279 | if (!list_empty(&dd->dispatch)) { | |
280 | rq = list_first_entry(&dd->dispatch, struct request, queuelist); | |
281 | list_del_init(&rq->queuelist); | |
282 | goto done; | |
283 | } | |
284 | ||
285 | reads = !list_empty(&dd->fifo_list[READ]); | |
286 | writes = !list_empty(&dd->fifo_list[WRITE]); | |
287 | ||
288 | /* | |
289 | * batches are currently reads XOR writes | |
290 | */ | |
bf09ce56 DLM |
291 | rq = deadline_next_request(dd, WRITE); |
292 | if (!rq) | |
293 | rq = deadline_next_request(dd, READ); | |
945ffb60 JA |
294 | |
295 | if (rq && dd->batching < dd->fifo_batch) | |
296 | /* we have a next request are still entitled to batch */ | |
297 | goto dispatch_request; | |
298 | ||
299 | /* | |
300 | * at this point we are not running a batch. select the appropriate | |
301 | * data direction (read / write) | |
302 | */ | |
303 | ||
304 | if (reads) { | |
305 | BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[READ])); | |
306 | ||
5700f691 DLM |
307 | if (deadline_fifo_request(dd, WRITE) && |
308 | (dd->starved++ >= dd->writes_starved)) | |
945ffb60 JA |
309 | goto dispatch_writes; |
310 | ||
311 | data_dir = READ; | |
312 | ||
313 | goto dispatch_find_request; | |
314 | } | |
315 | ||
316 | /* | |
317 | * there are either no reads or writes have been starved | |
318 | */ | |
319 | ||
320 | if (writes) { | |
321 | dispatch_writes: | |
322 | BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[WRITE])); | |
323 | ||
324 | dd->starved = 0; | |
325 | ||
326 | data_dir = WRITE; | |
327 | ||
328 | goto dispatch_find_request; | |
329 | } | |
330 | ||
331 | return NULL; | |
332 | ||
333 | dispatch_find_request: | |
334 | /* | |
335 | * we are not running a batch, find best request for selected data_dir | |
336 | */ | |
bf09ce56 DLM |
337 | next_rq = deadline_next_request(dd, data_dir); |
338 | if (deadline_check_fifo(dd, data_dir) || !next_rq) { | |
945ffb60 JA |
339 | /* |
340 | * A deadline has expired, the last request was in the other | |
341 | * direction, or we have run out of higher-sectored requests. | |
342 | * Start again from the request with the earliest expiry time. | |
343 | */ | |
bf09ce56 | 344 | rq = deadline_fifo_request(dd, data_dir); |
945ffb60 JA |
345 | } else { |
346 | /* | |
347 | * The last req was the same dir and we have a next request in | |
348 | * sort order. No expired requests so continue on from here. | |
349 | */ | |
bf09ce56 | 350 | rq = next_rq; |
945ffb60 JA |
351 | } |
352 | ||
5700f691 DLM |
353 | /* |
354 | * For a zoned block device, if we only have writes queued and none of | |
355 | * them can be dispatched, rq will be NULL. | |
356 | */ | |
357 | if (!rq) | |
358 | return NULL; | |
359 | ||
945ffb60 JA |
360 | dd->batching = 0; |
361 | ||
362 | dispatch_request: | |
363 | /* | |
364 | * rq is the selected appropriate request. | |
365 | */ | |
366 | dd->batching++; | |
367 | deadline_move_request(dd, rq); | |
368 | done: | |
5700f691 DLM |
369 | /* |
370 | * If the request needs its target zone locked, do it. | |
371 | */ | |
372 | blk_req_zone_write_lock(rq); | |
945ffb60 JA |
373 | rq->rq_flags |= RQF_STARTED; |
374 | return rq; | |
375 | } | |
376 | ||
ca11f209 JA |
377 | /* |
378 | * One confusing aspect here is that we get called for a specific | |
7211aef8 | 379 | * hardware queue, but we may return a request that is for a |
ca11f209 JA |
380 | * different hardware queue. This is because mq-deadline has shared |
381 | * state for all hardware queues, in terms of sorting, FIFOs, etc. | |
382 | */ | |
c13660a0 | 383 | static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx) |
945ffb60 JA |
384 | { |
385 | struct deadline_data *dd = hctx->queue->elevator->elevator_data; | |
c13660a0 | 386 | struct request *rq; |
945ffb60 JA |
387 | |
388 | spin_lock(&dd->lock); | |
ca11f209 | 389 | rq = __dd_dispatch_request(dd); |
945ffb60 | 390 | spin_unlock(&dd->lock); |
c13660a0 JA |
391 | |
392 | return rq; | |
945ffb60 JA |
393 | } |
394 | ||
395 | static void dd_exit_queue(struct elevator_queue *e) | |
396 | { | |
397 | struct deadline_data *dd = e->elevator_data; | |
398 | ||
399 | BUG_ON(!list_empty(&dd->fifo_list[READ])); | |
400 | BUG_ON(!list_empty(&dd->fifo_list[WRITE])); | |
401 | ||
402 | kfree(dd); | |
403 | } | |
404 | ||
405 | /* | |
406 | * initialize elevator private data (deadline_data). | |
407 | */ | |
408 | static int dd_init_queue(struct request_queue *q, struct elevator_type *e) | |
409 | { | |
410 | struct deadline_data *dd; | |
411 | struct elevator_queue *eq; | |
412 | ||
413 | eq = elevator_alloc(q, e); | |
414 | if (!eq) | |
415 | return -ENOMEM; | |
416 | ||
417 | dd = kzalloc_node(sizeof(*dd), GFP_KERNEL, q->node); | |
418 | if (!dd) { | |
419 | kobject_put(&eq->kobj); | |
420 | return -ENOMEM; | |
421 | } | |
422 | eq->elevator_data = dd; | |
423 | ||
424 | INIT_LIST_HEAD(&dd->fifo_list[READ]); | |
425 | INIT_LIST_HEAD(&dd->fifo_list[WRITE]); | |
426 | dd->sort_list[READ] = RB_ROOT; | |
427 | dd->sort_list[WRITE] = RB_ROOT; | |
428 | dd->fifo_expire[READ] = read_expire; | |
429 | dd->fifo_expire[WRITE] = write_expire; | |
430 | dd->writes_starved = writes_starved; | |
431 | dd->front_merges = 1; | |
432 | dd->fifo_batch = fifo_batch; | |
433 | spin_lock_init(&dd->lock); | |
5700f691 | 434 | spin_lock_init(&dd->zone_lock); |
945ffb60 JA |
435 | INIT_LIST_HEAD(&dd->dispatch); |
436 | ||
437 | q->elevator = eq; | |
438 | return 0; | |
439 | } | |
440 | ||
441 | static int dd_request_merge(struct request_queue *q, struct request **rq, | |
442 | struct bio *bio) | |
443 | { | |
444 | struct deadline_data *dd = q->elevator->elevator_data; | |
445 | sector_t sector = bio_end_sector(bio); | |
446 | struct request *__rq; | |
447 | ||
448 | if (!dd->front_merges) | |
449 | return ELEVATOR_NO_MERGE; | |
450 | ||
451 | __rq = elv_rb_find(&dd->sort_list[bio_data_dir(bio)], sector); | |
452 | if (__rq) { | |
453 | BUG_ON(sector != blk_rq_pos(__rq)); | |
454 | ||
455 | if (elv_bio_merge_ok(__rq, bio)) { | |
456 | *rq = __rq; | |
457 | return ELEVATOR_FRONT_MERGE; | |
458 | } | |
459 | } | |
460 | ||
461 | return ELEVATOR_NO_MERGE; | |
462 | } | |
463 | ||
14ccb66b CH |
464 | static bool dd_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio, |
465 | unsigned int nr_segs) | |
945ffb60 JA |
466 | { |
467 | struct request_queue *q = hctx->queue; | |
468 | struct deadline_data *dd = q->elevator->elevator_data; | |
e4d750c9 JA |
469 | struct request *free = NULL; |
470 | bool ret; | |
945ffb60 JA |
471 | |
472 | spin_lock(&dd->lock); | |
14ccb66b | 473 | ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free); |
945ffb60 JA |
474 | spin_unlock(&dd->lock); |
475 | ||
e4d750c9 JA |
476 | if (free) |
477 | blk_mq_free_request(free); | |
478 | ||
945ffb60 JA |
479 | return ret; |
480 | } | |
481 | ||
482 | /* | |
483 | * add rq to rbtree and fifo | |
484 | */ | |
485 | static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, | |
486 | bool at_head) | |
487 | { | |
488 | struct request_queue *q = hctx->queue; | |
489 | struct deadline_data *dd = q->elevator->elevator_data; | |
490 | const int data_dir = rq_data_dir(rq); | |
491 | ||
5700f691 DLM |
492 | /* |
493 | * This may be a requeue of a write request that has locked its | |
494 | * target zone. If it is the case, this releases the zone lock. | |
495 | */ | |
496 | blk_req_zone_write_unlock(rq); | |
497 | ||
945ffb60 JA |
498 | if (blk_mq_sched_try_insert_merge(q, rq)) |
499 | return; | |
500 | ||
b357e4a6 | 501 | trace_block_rq_insert(rq); |
945ffb60 | 502 | |
7687b38a LF |
503 | if (at_head) { |
504 | list_add(&rq->queuelist, &dd->dispatch); | |
945ffb60 JA |
505 | } else { |
506 | deadline_add_rq_rb(dd, rq); | |
507 | ||
508 | if (rq_mergeable(rq)) { | |
509 | elv_rqhash_add(q, rq); | |
510 | if (!q->last_merge) | |
511 | q->last_merge = rq; | |
512 | } | |
513 | ||
514 | /* | |
515 | * set expire time and add to fifo list | |
516 | */ | |
517 | rq->fifo_time = jiffies + dd->fifo_expire[data_dir]; | |
518 | list_add_tail(&rq->queuelist, &dd->fifo_list[data_dir]); | |
519 | } | |
520 | } | |
521 | ||
522 | static void dd_insert_requests(struct blk_mq_hw_ctx *hctx, | |
523 | struct list_head *list, bool at_head) | |
524 | { | |
525 | struct request_queue *q = hctx->queue; | |
526 | struct deadline_data *dd = q->elevator->elevator_data; | |
527 | ||
528 | spin_lock(&dd->lock); | |
529 | while (!list_empty(list)) { | |
530 | struct request *rq; | |
531 | ||
532 | rq = list_first_entry(list, struct request, queuelist); | |
533 | list_del_init(&rq->queuelist); | |
534 | dd_insert_request(hctx, rq, at_head); | |
535 | } | |
536 | spin_unlock(&dd->lock); | |
537 | } | |
538 | ||
f3bc78d2 DLM |
539 | /* |
540 | * Nothing to do here. This is defined only to ensure that .finish_request | |
541 | * method is called upon request completion. | |
542 | */ | |
5d9c305b | 543 | static void dd_prepare_request(struct request *rq) |
f3bc78d2 DLM |
544 | { |
545 | } | |
546 | ||
5700f691 DLM |
547 | /* |
548 | * For zoned block devices, write unlock the target zone of | |
549 | * completed write requests. Do this while holding the zone lock | |
550 | * spinlock so that the zone is never unlocked while deadline_fifo_request() | |
f3bc78d2 DLM |
551 | * or deadline_next_request() are executing. This function is called for |
552 | * all requests, whether or not these requests complete successfully. | |
cb8acabb DLM |
553 | * |
554 | * For a zoned block device, __dd_dispatch_request() may have stopped | |
555 | * dispatching requests if all the queued requests are write requests directed | |
556 | * at zones that are already locked due to on-going write requests. To ensure | |
557 | * write request dispatch progress in this case, mark the queue as needing a | |
558 | * restart to ensure that the queue is run again after completion of the | |
559 | * request and zones being unlocked. | |
5700f691 | 560 | */ |
f3bc78d2 | 561 | static void dd_finish_request(struct request *rq) |
5700f691 DLM |
562 | { |
563 | struct request_queue *q = rq->q; | |
564 | ||
565 | if (blk_queue_is_zoned(q)) { | |
566 | struct deadline_data *dd = q->elevator->elevator_data; | |
567 | unsigned long flags; | |
568 | ||
569 | spin_lock_irqsave(&dd->zone_lock, flags); | |
570 | blk_req_zone_write_unlock(rq); | |
cb8acabb DLM |
571 | if (!list_empty(&dd->fifo_list[WRITE])) |
572 | blk_mq_sched_mark_restart_hctx(rq->mq_hctx); | |
5700f691 DLM |
573 | spin_unlock_irqrestore(&dd->zone_lock, flags); |
574 | } | |
575 | } | |
576 | ||
945ffb60 JA |
577 | static bool dd_has_work(struct blk_mq_hw_ctx *hctx) |
578 | { | |
579 | struct deadline_data *dd = hctx->queue->elevator->elevator_data; | |
580 | ||
581 | return !list_empty_careful(&dd->dispatch) || | |
582 | !list_empty_careful(&dd->fifo_list[0]) || | |
583 | !list_empty_careful(&dd->fifo_list[1]); | |
584 | } | |
585 | ||
586 | /* | |
587 | * sysfs parts below | |
588 | */ | |
589 | static ssize_t | |
590 | deadline_var_show(int var, char *page) | |
591 | { | |
592 | return sprintf(page, "%d\n", var); | |
593 | } | |
594 | ||
235f8da1 | 595 | static void |
596 | deadline_var_store(int *var, const char *page) | |
945ffb60 JA |
597 | { |
598 | char *p = (char *) page; | |
599 | ||
600 | *var = simple_strtol(p, &p, 10); | |
945ffb60 JA |
601 | } |
602 | ||
603 | #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ | |
604 | static ssize_t __FUNC(struct elevator_queue *e, char *page) \ | |
605 | { \ | |
606 | struct deadline_data *dd = e->elevator_data; \ | |
607 | int __data = __VAR; \ | |
608 | if (__CONV) \ | |
609 | __data = jiffies_to_msecs(__data); \ | |
610 | return deadline_var_show(__data, (page)); \ | |
611 | } | |
612 | SHOW_FUNCTION(deadline_read_expire_show, dd->fifo_expire[READ], 1); | |
613 | SHOW_FUNCTION(deadline_write_expire_show, dd->fifo_expire[WRITE], 1); | |
614 | SHOW_FUNCTION(deadline_writes_starved_show, dd->writes_starved, 0); | |
615 | SHOW_FUNCTION(deadline_front_merges_show, dd->front_merges, 0); | |
616 | SHOW_FUNCTION(deadline_fifo_batch_show, dd->fifo_batch, 0); | |
617 | #undef SHOW_FUNCTION | |
618 | ||
619 | #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ | |
620 | static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \ | |
621 | { \ | |
622 | struct deadline_data *dd = e->elevator_data; \ | |
623 | int __data; \ | |
235f8da1 | 624 | deadline_var_store(&__data, (page)); \ |
945ffb60 JA |
625 | if (__data < (MIN)) \ |
626 | __data = (MIN); \ | |
627 | else if (__data > (MAX)) \ | |
628 | __data = (MAX); \ | |
629 | if (__CONV) \ | |
630 | *(__PTR) = msecs_to_jiffies(__data); \ | |
631 | else \ | |
632 | *(__PTR) = __data; \ | |
235f8da1 | 633 | return count; \ |
945ffb60 JA |
634 | } |
635 | STORE_FUNCTION(deadline_read_expire_store, &dd->fifo_expire[READ], 0, INT_MAX, 1); | |
636 | STORE_FUNCTION(deadline_write_expire_store, &dd->fifo_expire[WRITE], 0, INT_MAX, 1); | |
637 | STORE_FUNCTION(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX, 0); | |
638 | STORE_FUNCTION(deadline_front_merges_store, &dd->front_merges, 0, 1, 0); | |
639 | STORE_FUNCTION(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX, 0); | |
640 | #undef STORE_FUNCTION | |
641 | ||
642 | #define DD_ATTR(name) \ | |
5657a819 | 643 | __ATTR(name, 0644, deadline_##name##_show, deadline_##name##_store) |
945ffb60 JA |
644 | |
645 | static struct elv_fs_entry deadline_attrs[] = { | |
646 | DD_ATTR(read_expire), | |
647 | DD_ATTR(write_expire), | |
648 | DD_ATTR(writes_starved), | |
649 | DD_ATTR(front_merges), | |
650 | DD_ATTR(fifo_batch), | |
651 | __ATTR_NULL | |
652 | }; | |
653 | ||
daaadb3e OS |
654 | #ifdef CONFIG_BLK_DEBUG_FS |
655 | #define DEADLINE_DEBUGFS_DDIR_ATTRS(ddir, name) \ | |
656 | static void *deadline_##name##_fifo_start(struct seq_file *m, \ | |
657 | loff_t *pos) \ | |
658 | __acquires(&dd->lock) \ | |
659 | { \ | |
660 | struct request_queue *q = m->private; \ | |
661 | struct deadline_data *dd = q->elevator->elevator_data; \ | |
662 | \ | |
663 | spin_lock(&dd->lock); \ | |
664 | return seq_list_start(&dd->fifo_list[ddir], *pos); \ | |
665 | } \ | |
666 | \ | |
667 | static void *deadline_##name##_fifo_next(struct seq_file *m, void *v, \ | |
668 | loff_t *pos) \ | |
669 | { \ | |
670 | struct request_queue *q = m->private; \ | |
671 | struct deadline_data *dd = q->elevator->elevator_data; \ | |
672 | \ | |
673 | return seq_list_next(v, &dd->fifo_list[ddir], pos); \ | |
674 | } \ | |
675 | \ | |
676 | static void deadline_##name##_fifo_stop(struct seq_file *m, void *v) \ | |
677 | __releases(&dd->lock) \ | |
678 | { \ | |
679 | struct request_queue *q = m->private; \ | |
680 | struct deadline_data *dd = q->elevator->elevator_data; \ | |
681 | \ | |
682 | spin_unlock(&dd->lock); \ | |
683 | } \ | |
684 | \ | |
685 | static const struct seq_operations deadline_##name##_fifo_seq_ops = { \ | |
686 | .start = deadline_##name##_fifo_start, \ | |
687 | .next = deadline_##name##_fifo_next, \ | |
688 | .stop = deadline_##name##_fifo_stop, \ | |
689 | .show = blk_mq_debugfs_rq_show, \ | |
690 | }; \ | |
691 | \ | |
692 | static int deadline_##name##_next_rq_show(void *data, \ | |
693 | struct seq_file *m) \ | |
694 | { \ | |
695 | struct request_queue *q = data; \ | |
696 | struct deadline_data *dd = q->elevator->elevator_data; \ | |
697 | struct request *rq = dd->next_rq[ddir]; \ | |
698 | \ | |
699 | if (rq) \ | |
700 | __blk_mq_debugfs_rq_show(m, rq); \ | |
701 | return 0; \ | |
702 | } | |
703 | DEADLINE_DEBUGFS_DDIR_ATTRS(READ, read) | |
704 | DEADLINE_DEBUGFS_DDIR_ATTRS(WRITE, write) | |
705 | #undef DEADLINE_DEBUGFS_DDIR_ATTRS | |
706 | ||
707 | static int deadline_batching_show(void *data, struct seq_file *m) | |
708 | { | |
709 | struct request_queue *q = data; | |
710 | struct deadline_data *dd = q->elevator->elevator_data; | |
711 | ||
712 | seq_printf(m, "%u\n", dd->batching); | |
713 | return 0; | |
714 | } | |
715 | ||
716 | static int deadline_starved_show(void *data, struct seq_file *m) | |
717 | { | |
718 | struct request_queue *q = data; | |
719 | struct deadline_data *dd = q->elevator->elevator_data; | |
720 | ||
721 | seq_printf(m, "%u\n", dd->starved); | |
722 | return 0; | |
723 | } | |
724 | ||
725 | static void *deadline_dispatch_start(struct seq_file *m, loff_t *pos) | |
726 | __acquires(&dd->lock) | |
727 | { | |
728 | struct request_queue *q = m->private; | |
729 | struct deadline_data *dd = q->elevator->elevator_data; | |
730 | ||
731 | spin_lock(&dd->lock); | |
732 | return seq_list_start(&dd->dispatch, *pos); | |
733 | } | |
734 | ||
735 | static void *deadline_dispatch_next(struct seq_file *m, void *v, loff_t *pos) | |
736 | { | |
737 | struct request_queue *q = m->private; | |
738 | struct deadline_data *dd = q->elevator->elevator_data; | |
739 | ||
740 | return seq_list_next(v, &dd->dispatch, pos); | |
741 | } | |
742 | ||
743 | static void deadline_dispatch_stop(struct seq_file *m, void *v) | |
744 | __releases(&dd->lock) | |
745 | { | |
746 | struct request_queue *q = m->private; | |
747 | struct deadline_data *dd = q->elevator->elevator_data; | |
748 | ||
749 | spin_unlock(&dd->lock); | |
750 | } | |
751 | ||
752 | static const struct seq_operations deadline_dispatch_seq_ops = { | |
753 | .start = deadline_dispatch_start, | |
754 | .next = deadline_dispatch_next, | |
755 | .stop = deadline_dispatch_stop, | |
756 | .show = blk_mq_debugfs_rq_show, | |
757 | }; | |
758 | ||
759 | #define DEADLINE_QUEUE_DDIR_ATTRS(name) \ | |
760 | {#name "_fifo_list", 0400, .seq_ops = &deadline_##name##_fifo_seq_ops}, \ | |
761 | {#name "_next_rq", 0400, deadline_##name##_next_rq_show} | |
762 | static const struct blk_mq_debugfs_attr deadline_queue_debugfs_attrs[] = { | |
763 | DEADLINE_QUEUE_DDIR_ATTRS(read), | |
764 | DEADLINE_QUEUE_DDIR_ATTRS(write), | |
765 | {"batching", 0400, deadline_batching_show}, | |
766 | {"starved", 0400, deadline_starved_show}, | |
767 | {"dispatch", 0400, .seq_ops = &deadline_dispatch_seq_ops}, | |
768 | {}, | |
769 | }; | |
770 | #undef DEADLINE_QUEUE_DDIR_ATTRS | |
771 | #endif | |
772 | ||
945ffb60 | 773 | static struct elevator_type mq_deadline = { |
f9cd4bfe | 774 | .ops = { |
945ffb60 | 775 | .insert_requests = dd_insert_requests, |
c13660a0 | 776 | .dispatch_request = dd_dispatch_request, |
f3bc78d2 DLM |
777 | .prepare_request = dd_prepare_request, |
778 | .finish_request = dd_finish_request, | |
945ffb60 JA |
779 | .next_request = elv_rb_latter_request, |
780 | .former_request = elv_rb_former_request, | |
781 | .bio_merge = dd_bio_merge, | |
782 | .request_merge = dd_request_merge, | |
783 | .requests_merged = dd_merged_requests, | |
784 | .request_merged = dd_request_merged, | |
785 | .has_work = dd_has_work, | |
786 | .init_sched = dd_init_queue, | |
787 | .exit_sched = dd_exit_queue, | |
788 | }, | |
789 | ||
daaadb3e OS |
790 | #ifdef CONFIG_BLK_DEBUG_FS |
791 | .queue_debugfs_attrs = deadline_queue_debugfs_attrs, | |
792 | #endif | |
945ffb60 JA |
793 | .elevator_attrs = deadline_attrs, |
794 | .elevator_name = "mq-deadline", | |
4d740bc9 | 795 | .elevator_alias = "deadline", |
68c43f13 | 796 | .elevator_features = ELEVATOR_F_ZBD_SEQ_WRITE, |
945ffb60 JA |
797 | .elevator_owner = THIS_MODULE, |
798 | }; | |
7de967e7 | 799 | MODULE_ALIAS("mq-deadline-iosched"); |
945ffb60 JA |
800 | |
801 | static int __init deadline_init(void) | |
802 | { | |
803 | return elv_register(&mq_deadline); | |
804 | } | |
805 | ||
806 | static void __exit deadline_exit(void) | |
807 | { | |
808 | elv_unregister(&mq_deadline); | |
809 | } | |
810 | ||
811 | module_init(deadline_init); | |
812 | module_exit(deadline_exit); | |
813 | ||
814 | MODULE_AUTHOR("Jens Axboe"); | |
815 | MODULE_LICENSE("GPL"); | |
816 | MODULE_DESCRIPTION("MQ deadline IO scheduler"); |