Commit | Line | Data |
---|---|---|
945ffb60 JA |
1 | /* |
2 | * MQ Deadline i/o scheduler - adaptation of the legacy deadline scheduler, | |
3 | * for the blk-mq scheduling framework | |
4 | * | |
5 | * Copyright (C) 2016 Jens Axboe <axboe@kernel.dk> | |
6 | */ | |
7 | #include <linux/kernel.h> | |
8 | #include <linux/fs.h> | |
9 | #include <linux/blkdev.h> | |
10 | #include <linux/blk-mq.h> | |
11 | #include <linux/elevator.h> | |
12 | #include <linux/bio.h> | |
13 | #include <linux/module.h> | |
14 | #include <linux/slab.h> | |
15 | #include <linux/init.h> | |
16 | #include <linux/compiler.h> | |
17 | #include <linux/rbtree.h> | |
18 | #include <linux/sbitmap.h> | |
19 | ||
20 | #include "blk.h" | |
21 | #include "blk-mq.h" | |
daaadb3e | 22 | #include "blk-mq-debugfs.h" |
945ffb60 JA |
23 | #include "blk-mq-tag.h" |
24 | #include "blk-mq-sched.h" | |
25 | ||
26 | /* | |
27 | * See Documentation/block/deadline-iosched.txt | |
28 | */ | |
29 | static const int read_expire = HZ / 2; /* max time before a read is submitted. */ | |
30 | static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */ | |
31 | static const int writes_starved = 2; /* max times reads can starve a write */ | |
32 | static const int fifo_batch = 16; /* # of sequential requests treated as one | |
33 | by the above parameters. For throughput. */ | |
34 | ||
35 | struct deadline_data { | |
36 | /* | |
37 | * run time data | |
38 | */ | |
39 | ||
40 | /* | |
41 | * requests (deadline_rq s) are present on both sort_list and fifo_list | |
42 | */ | |
43 | struct rb_root sort_list[2]; | |
44 | struct list_head fifo_list[2]; | |
45 | ||
46 | /* | |
47 | * next in sort order. read, write or both are NULL | |
48 | */ | |
49 | struct request *next_rq[2]; | |
50 | unsigned int batching; /* number of sequential requests made */ | |
51 | unsigned int starved; /* times reads have starved writes */ | |
52 | ||
53 | /* | |
54 | * settings that change how the i/o scheduler behaves | |
55 | */ | |
56 | int fifo_expire[2]; | |
57 | int fifo_batch; | |
58 | int writes_starved; | |
59 | int front_merges; | |
60 | ||
61 | spinlock_t lock; | |
62 | struct list_head dispatch; | |
63 | }; | |
64 | ||
65 | static inline struct rb_root * | |
66 | deadline_rb_root(struct deadline_data *dd, struct request *rq) | |
67 | { | |
68 | return &dd->sort_list[rq_data_dir(rq)]; | |
69 | } | |
70 | ||
71 | /* | |
72 | * get the request after `rq' in sector-sorted order | |
73 | */ | |
74 | static inline struct request * | |
75 | deadline_latter_request(struct request *rq) | |
76 | { | |
77 | struct rb_node *node = rb_next(&rq->rb_node); | |
78 | ||
79 | if (node) | |
80 | return rb_entry_rq(node); | |
81 | ||
82 | return NULL; | |
83 | } | |
84 | ||
85 | static void | |
86 | deadline_add_rq_rb(struct deadline_data *dd, struct request *rq) | |
87 | { | |
88 | struct rb_root *root = deadline_rb_root(dd, rq); | |
89 | ||
90 | elv_rb_add(root, rq); | |
91 | } | |
92 | ||
93 | static inline void | |
94 | deadline_del_rq_rb(struct deadline_data *dd, struct request *rq) | |
95 | { | |
96 | const int data_dir = rq_data_dir(rq); | |
97 | ||
98 | if (dd->next_rq[data_dir] == rq) | |
99 | dd->next_rq[data_dir] = deadline_latter_request(rq); | |
100 | ||
101 | elv_rb_del(deadline_rb_root(dd, rq), rq); | |
102 | } | |
103 | ||
104 | /* | |
105 | * remove rq from rbtree and fifo. | |
106 | */ | |
107 | static void deadline_remove_request(struct request_queue *q, struct request *rq) | |
108 | { | |
109 | struct deadline_data *dd = q->elevator->elevator_data; | |
110 | ||
111 | list_del_init(&rq->queuelist); | |
112 | ||
113 | /* | |
114 | * We might not be on the rbtree, if we are doing an insert merge | |
115 | */ | |
116 | if (!RB_EMPTY_NODE(&rq->rb_node)) | |
117 | deadline_del_rq_rb(dd, rq); | |
118 | ||
119 | elv_rqhash_del(q, rq); | |
120 | if (q->last_merge == rq) | |
121 | q->last_merge = NULL; | |
122 | } | |
123 | ||
124 | static void dd_request_merged(struct request_queue *q, struct request *req, | |
34fe7c05 | 125 | enum elv_merge type) |
945ffb60 JA |
126 | { |
127 | struct deadline_data *dd = q->elevator->elevator_data; | |
128 | ||
129 | /* | |
130 | * if the merge was a front merge, we need to reposition request | |
131 | */ | |
132 | if (type == ELEVATOR_FRONT_MERGE) { | |
133 | elv_rb_del(deadline_rb_root(dd, req), req); | |
134 | deadline_add_rq_rb(dd, req); | |
135 | } | |
136 | } | |
137 | ||
138 | static void dd_merged_requests(struct request_queue *q, struct request *req, | |
139 | struct request *next) | |
140 | { | |
141 | /* | |
142 | * if next expires before rq, assign its expire time to rq | |
143 | * and move into next position (next will be deleted) in fifo | |
144 | */ | |
145 | if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) { | |
146 | if (time_before((unsigned long)next->fifo_time, | |
147 | (unsigned long)req->fifo_time)) { | |
148 | list_move(&req->queuelist, &next->queuelist); | |
149 | req->fifo_time = next->fifo_time; | |
150 | } | |
151 | } | |
152 | ||
153 | /* | |
154 | * kill knowledge of next, this one is a goner | |
155 | */ | |
156 | deadline_remove_request(q, next); | |
157 | } | |
158 | ||
159 | /* | |
160 | * move an entry to dispatch queue | |
161 | */ | |
162 | static void | |
163 | deadline_move_request(struct deadline_data *dd, struct request *rq) | |
164 | { | |
165 | const int data_dir = rq_data_dir(rq); | |
166 | ||
167 | dd->next_rq[READ] = NULL; | |
168 | dd->next_rq[WRITE] = NULL; | |
169 | dd->next_rq[data_dir] = deadline_latter_request(rq); | |
170 | ||
171 | /* | |
172 | * take it off the sort and fifo list | |
173 | */ | |
174 | deadline_remove_request(rq->q, rq); | |
175 | } | |
176 | ||
177 | /* | |
178 | * deadline_check_fifo returns 0 if there are no expired requests on the fifo, | |
179 | * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir]) | |
180 | */ | |
181 | static inline int deadline_check_fifo(struct deadline_data *dd, int ddir) | |
182 | { | |
183 | struct request *rq = rq_entry_fifo(dd->fifo_list[ddir].next); | |
184 | ||
185 | /* | |
186 | * rq is expired! | |
187 | */ | |
188 | if (time_after_eq(jiffies, (unsigned long)rq->fifo_time)) | |
189 | return 1; | |
190 | ||
191 | return 0; | |
192 | } | |
193 | ||
bf09ce56 DLM |
194 | /* |
195 | * For the specified data direction, return the next request to | |
196 | * dispatch using arrival ordered lists. | |
197 | */ | |
198 | static struct request * | |
199 | deadline_fifo_request(struct deadline_data *dd, int data_dir) | |
200 | { | |
201 | if (WARN_ON_ONCE(data_dir != READ && data_dir != WRITE)) | |
202 | return NULL; | |
203 | ||
204 | if (list_empty(&dd->fifo_list[data_dir])) | |
205 | return NULL; | |
206 | ||
207 | return rq_entry_fifo(dd->fifo_list[data_dir].next); | |
208 | } | |
209 | ||
210 | /* | |
211 | * For the specified data direction, return the next request to | |
212 | * dispatch using sector position sorted lists. | |
213 | */ | |
214 | static struct request * | |
215 | deadline_next_request(struct deadline_data *dd, int data_dir) | |
216 | { | |
217 | if (WARN_ON_ONCE(data_dir != READ && data_dir != WRITE)) | |
218 | return NULL; | |
219 | ||
220 | return dd->next_rq[data_dir]; | |
221 | } | |
222 | ||
945ffb60 JA |
223 | /* |
224 | * deadline_dispatch_requests selects the best request according to | |
225 | * read/write expire, fifo_batch, etc | |
226 | */ | |
227 | static struct request *__dd_dispatch_request(struct blk_mq_hw_ctx *hctx) | |
228 | { | |
229 | struct deadline_data *dd = hctx->queue->elevator->elevator_data; | |
bf09ce56 | 230 | struct request *rq, *next_rq; |
945ffb60 JA |
231 | bool reads, writes; |
232 | int data_dir; | |
233 | ||
234 | if (!list_empty(&dd->dispatch)) { | |
235 | rq = list_first_entry(&dd->dispatch, struct request, queuelist); | |
236 | list_del_init(&rq->queuelist); | |
237 | goto done; | |
238 | } | |
239 | ||
240 | reads = !list_empty(&dd->fifo_list[READ]); | |
241 | writes = !list_empty(&dd->fifo_list[WRITE]); | |
242 | ||
243 | /* | |
244 | * batches are currently reads XOR writes | |
245 | */ | |
bf09ce56 DLM |
246 | rq = deadline_next_request(dd, WRITE); |
247 | if (!rq) | |
248 | rq = deadline_next_request(dd, READ); | |
945ffb60 JA |
249 | |
250 | if (rq && dd->batching < dd->fifo_batch) | |
251 | /* we have a next request are still entitled to batch */ | |
252 | goto dispatch_request; | |
253 | ||
254 | /* | |
255 | * at this point we are not running a batch. select the appropriate | |
256 | * data direction (read / write) | |
257 | */ | |
258 | ||
259 | if (reads) { | |
260 | BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[READ])); | |
261 | ||
262 | if (writes && (dd->starved++ >= dd->writes_starved)) | |
263 | goto dispatch_writes; | |
264 | ||
265 | data_dir = READ; | |
266 | ||
267 | goto dispatch_find_request; | |
268 | } | |
269 | ||
270 | /* | |
271 | * there are either no reads or writes have been starved | |
272 | */ | |
273 | ||
274 | if (writes) { | |
275 | dispatch_writes: | |
276 | BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[WRITE])); | |
277 | ||
278 | dd->starved = 0; | |
279 | ||
280 | data_dir = WRITE; | |
281 | ||
282 | goto dispatch_find_request; | |
283 | } | |
284 | ||
285 | return NULL; | |
286 | ||
287 | dispatch_find_request: | |
288 | /* | |
289 | * we are not running a batch, find best request for selected data_dir | |
290 | */ | |
bf09ce56 DLM |
291 | next_rq = deadline_next_request(dd, data_dir); |
292 | if (deadline_check_fifo(dd, data_dir) || !next_rq) { | |
945ffb60 JA |
293 | /* |
294 | * A deadline has expired, the last request was in the other | |
295 | * direction, or we have run out of higher-sectored requests. | |
296 | * Start again from the request with the earliest expiry time. | |
297 | */ | |
bf09ce56 | 298 | rq = deadline_fifo_request(dd, data_dir); |
945ffb60 JA |
299 | } else { |
300 | /* | |
301 | * The last req was the same dir and we have a next request in | |
302 | * sort order. No expired requests so continue on from here. | |
303 | */ | |
bf09ce56 | 304 | rq = next_rq; |
945ffb60 JA |
305 | } |
306 | ||
307 | dd->batching = 0; | |
308 | ||
309 | dispatch_request: | |
310 | /* | |
311 | * rq is the selected appropriate request. | |
312 | */ | |
313 | dd->batching++; | |
314 | deadline_move_request(dd, rq); | |
315 | done: | |
316 | rq->rq_flags |= RQF_STARTED; | |
317 | return rq; | |
318 | } | |
319 | ||
c13660a0 | 320 | static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx) |
945ffb60 JA |
321 | { |
322 | struct deadline_data *dd = hctx->queue->elevator->elevator_data; | |
c13660a0 | 323 | struct request *rq; |
945ffb60 JA |
324 | |
325 | spin_lock(&dd->lock); | |
c13660a0 | 326 | rq = __dd_dispatch_request(hctx); |
945ffb60 | 327 | spin_unlock(&dd->lock); |
c13660a0 JA |
328 | |
329 | return rq; | |
945ffb60 JA |
330 | } |
331 | ||
332 | static void dd_exit_queue(struct elevator_queue *e) | |
333 | { | |
334 | struct deadline_data *dd = e->elevator_data; | |
335 | ||
336 | BUG_ON(!list_empty(&dd->fifo_list[READ])); | |
337 | BUG_ON(!list_empty(&dd->fifo_list[WRITE])); | |
338 | ||
339 | kfree(dd); | |
340 | } | |
341 | ||
342 | /* | |
343 | * initialize elevator private data (deadline_data). | |
344 | */ | |
345 | static int dd_init_queue(struct request_queue *q, struct elevator_type *e) | |
346 | { | |
347 | struct deadline_data *dd; | |
348 | struct elevator_queue *eq; | |
349 | ||
350 | eq = elevator_alloc(q, e); | |
351 | if (!eq) | |
352 | return -ENOMEM; | |
353 | ||
354 | dd = kzalloc_node(sizeof(*dd), GFP_KERNEL, q->node); | |
355 | if (!dd) { | |
356 | kobject_put(&eq->kobj); | |
357 | return -ENOMEM; | |
358 | } | |
359 | eq->elevator_data = dd; | |
360 | ||
361 | INIT_LIST_HEAD(&dd->fifo_list[READ]); | |
362 | INIT_LIST_HEAD(&dd->fifo_list[WRITE]); | |
363 | dd->sort_list[READ] = RB_ROOT; | |
364 | dd->sort_list[WRITE] = RB_ROOT; | |
365 | dd->fifo_expire[READ] = read_expire; | |
366 | dd->fifo_expire[WRITE] = write_expire; | |
367 | dd->writes_starved = writes_starved; | |
368 | dd->front_merges = 1; | |
369 | dd->fifo_batch = fifo_batch; | |
370 | spin_lock_init(&dd->lock); | |
371 | INIT_LIST_HEAD(&dd->dispatch); | |
372 | ||
373 | q->elevator = eq; | |
374 | return 0; | |
375 | } | |
376 | ||
377 | static int dd_request_merge(struct request_queue *q, struct request **rq, | |
378 | struct bio *bio) | |
379 | { | |
380 | struct deadline_data *dd = q->elevator->elevator_data; | |
381 | sector_t sector = bio_end_sector(bio); | |
382 | struct request *__rq; | |
383 | ||
384 | if (!dd->front_merges) | |
385 | return ELEVATOR_NO_MERGE; | |
386 | ||
387 | __rq = elv_rb_find(&dd->sort_list[bio_data_dir(bio)], sector); | |
388 | if (__rq) { | |
389 | BUG_ON(sector != blk_rq_pos(__rq)); | |
390 | ||
391 | if (elv_bio_merge_ok(__rq, bio)) { | |
392 | *rq = __rq; | |
393 | return ELEVATOR_FRONT_MERGE; | |
394 | } | |
395 | } | |
396 | ||
397 | return ELEVATOR_NO_MERGE; | |
398 | } | |
399 | ||
400 | static bool dd_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio) | |
401 | { | |
402 | struct request_queue *q = hctx->queue; | |
403 | struct deadline_data *dd = q->elevator->elevator_data; | |
e4d750c9 JA |
404 | struct request *free = NULL; |
405 | bool ret; | |
945ffb60 JA |
406 | |
407 | spin_lock(&dd->lock); | |
e4d750c9 | 408 | ret = blk_mq_sched_try_merge(q, bio, &free); |
945ffb60 JA |
409 | spin_unlock(&dd->lock); |
410 | ||
e4d750c9 JA |
411 | if (free) |
412 | blk_mq_free_request(free); | |
413 | ||
945ffb60 JA |
414 | return ret; |
415 | } | |
416 | ||
417 | /* | |
418 | * add rq to rbtree and fifo | |
419 | */ | |
420 | static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, | |
421 | bool at_head) | |
422 | { | |
423 | struct request_queue *q = hctx->queue; | |
424 | struct deadline_data *dd = q->elevator->elevator_data; | |
425 | const int data_dir = rq_data_dir(rq); | |
426 | ||
427 | if (blk_mq_sched_try_insert_merge(q, rq)) | |
428 | return; | |
429 | ||
430 | blk_mq_sched_request_inserted(rq); | |
431 | ||
57292b58 | 432 | if (at_head || blk_rq_is_passthrough(rq)) { |
945ffb60 JA |
433 | if (at_head) |
434 | list_add(&rq->queuelist, &dd->dispatch); | |
435 | else | |
436 | list_add_tail(&rq->queuelist, &dd->dispatch); | |
437 | } else { | |
438 | deadline_add_rq_rb(dd, rq); | |
439 | ||
440 | if (rq_mergeable(rq)) { | |
441 | elv_rqhash_add(q, rq); | |
442 | if (!q->last_merge) | |
443 | q->last_merge = rq; | |
444 | } | |
445 | ||
446 | /* | |
447 | * set expire time and add to fifo list | |
448 | */ | |
449 | rq->fifo_time = jiffies + dd->fifo_expire[data_dir]; | |
450 | list_add_tail(&rq->queuelist, &dd->fifo_list[data_dir]); | |
451 | } | |
452 | } | |
453 | ||
454 | static void dd_insert_requests(struct blk_mq_hw_ctx *hctx, | |
455 | struct list_head *list, bool at_head) | |
456 | { | |
457 | struct request_queue *q = hctx->queue; | |
458 | struct deadline_data *dd = q->elevator->elevator_data; | |
459 | ||
460 | spin_lock(&dd->lock); | |
461 | while (!list_empty(list)) { | |
462 | struct request *rq; | |
463 | ||
464 | rq = list_first_entry(list, struct request, queuelist); | |
465 | list_del_init(&rq->queuelist); | |
466 | dd_insert_request(hctx, rq, at_head); | |
467 | } | |
468 | spin_unlock(&dd->lock); | |
469 | } | |
470 | ||
471 | static bool dd_has_work(struct blk_mq_hw_ctx *hctx) | |
472 | { | |
473 | struct deadline_data *dd = hctx->queue->elevator->elevator_data; | |
474 | ||
475 | return !list_empty_careful(&dd->dispatch) || | |
476 | !list_empty_careful(&dd->fifo_list[0]) || | |
477 | !list_empty_careful(&dd->fifo_list[1]); | |
478 | } | |
479 | ||
480 | /* | |
481 | * sysfs parts below | |
482 | */ | |
483 | static ssize_t | |
484 | deadline_var_show(int var, char *page) | |
485 | { | |
486 | return sprintf(page, "%d\n", var); | |
487 | } | |
488 | ||
235f8da1 | 489 | static void |
490 | deadline_var_store(int *var, const char *page) | |
945ffb60 JA |
491 | { |
492 | char *p = (char *) page; | |
493 | ||
494 | *var = simple_strtol(p, &p, 10); | |
945ffb60 JA |
495 | } |
496 | ||
497 | #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ | |
498 | static ssize_t __FUNC(struct elevator_queue *e, char *page) \ | |
499 | { \ | |
500 | struct deadline_data *dd = e->elevator_data; \ | |
501 | int __data = __VAR; \ | |
502 | if (__CONV) \ | |
503 | __data = jiffies_to_msecs(__data); \ | |
504 | return deadline_var_show(__data, (page)); \ | |
505 | } | |
506 | SHOW_FUNCTION(deadline_read_expire_show, dd->fifo_expire[READ], 1); | |
507 | SHOW_FUNCTION(deadline_write_expire_show, dd->fifo_expire[WRITE], 1); | |
508 | SHOW_FUNCTION(deadline_writes_starved_show, dd->writes_starved, 0); | |
509 | SHOW_FUNCTION(deadline_front_merges_show, dd->front_merges, 0); | |
510 | SHOW_FUNCTION(deadline_fifo_batch_show, dd->fifo_batch, 0); | |
511 | #undef SHOW_FUNCTION | |
512 | ||
513 | #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ | |
514 | static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \ | |
515 | { \ | |
516 | struct deadline_data *dd = e->elevator_data; \ | |
517 | int __data; \ | |
235f8da1 | 518 | deadline_var_store(&__data, (page)); \ |
945ffb60 JA |
519 | if (__data < (MIN)) \ |
520 | __data = (MIN); \ | |
521 | else if (__data > (MAX)) \ | |
522 | __data = (MAX); \ | |
523 | if (__CONV) \ | |
524 | *(__PTR) = msecs_to_jiffies(__data); \ | |
525 | else \ | |
526 | *(__PTR) = __data; \ | |
235f8da1 | 527 | return count; \ |
945ffb60 JA |
528 | } |
529 | STORE_FUNCTION(deadline_read_expire_store, &dd->fifo_expire[READ], 0, INT_MAX, 1); | |
530 | STORE_FUNCTION(deadline_write_expire_store, &dd->fifo_expire[WRITE], 0, INT_MAX, 1); | |
531 | STORE_FUNCTION(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX, 0); | |
532 | STORE_FUNCTION(deadline_front_merges_store, &dd->front_merges, 0, 1, 0); | |
533 | STORE_FUNCTION(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX, 0); | |
534 | #undef STORE_FUNCTION | |
535 | ||
536 | #define DD_ATTR(name) \ | |
537 | __ATTR(name, S_IRUGO|S_IWUSR, deadline_##name##_show, \ | |
538 | deadline_##name##_store) | |
539 | ||
540 | static struct elv_fs_entry deadline_attrs[] = { | |
541 | DD_ATTR(read_expire), | |
542 | DD_ATTR(write_expire), | |
543 | DD_ATTR(writes_starved), | |
544 | DD_ATTR(front_merges), | |
545 | DD_ATTR(fifo_batch), | |
546 | __ATTR_NULL | |
547 | }; | |
548 | ||
daaadb3e OS |
549 | #ifdef CONFIG_BLK_DEBUG_FS |
550 | #define DEADLINE_DEBUGFS_DDIR_ATTRS(ddir, name) \ | |
551 | static void *deadline_##name##_fifo_start(struct seq_file *m, \ | |
552 | loff_t *pos) \ | |
553 | __acquires(&dd->lock) \ | |
554 | { \ | |
555 | struct request_queue *q = m->private; \ | |
556 | struct deadline_data *dd = q->elevator->elevator_data; \ | |
557 | \ | |
558 | spin_lock(&dd->lock); \ | |
559 | return seq_list_start(&dd->fifo_list[ddir], *pos); \ | |
560 | } \ | |
561 | \ | |
562 | static void *deadline_##name##_fifo_next(struct seq_file *m, void *v, \ | |
563 | loff_t *pos) \ | |
564 | { \ | |
565 | struct request_queue *q = m->private; \ | |
566 | struct deadline_data *dd = q->elevator->elevator_data; \ | |
567 | \ | |
568 | return seq_list_next(v, &dd->fifo_list[ddir], pos); \ | |
569 | } \ | |
570 | \ | |
571 | static void deadline_##name##_fifo_stop(struct seq_file *m, void *v) \ | |
572 | __releases(&dd->lock) \ | |
573 | { \ | |
574 | struct request_queue *q = m->private; \ | |
575 | struct deadline_data *dd = q->elevator->elevator_data; \ | |
576 | \ | |
577 | spin_unlock(&dd->lock); \ | |
578 | } \ | |
579 | \ | |
580 | static const struct seq_operations deadline_##name##_fifo_seq_ops = { \ | |
581 | .start = deadline_##name##_fifo_start, \ | |
582 | .next = deadline_##name##_fifo_next, \ | |
583 | .stop = deadline_##name##_fifo_stop, \ | |
584 | .show = blk_mq_debugfs_rq_show, \ | |
585 | }; \ | |
586 | \ | |
587 | static int deadline_##name##_next_rq_show(void *data, \ | |
588 | struct seq_file *m) \ | |
589 | { \ | |
590 | struct request_queue *q = data; \ | |
591 | struct deadline_data *dd = q->elevator->elevator_data; \ | |
592 | struct request *rq = dd->next_rq[ddir]; \ | |
593 | \ | |
594 | if (rq) \ | |
595 | __blk_mq_debugfs_rq_show(m, rq); \ | |
596 | return 0; \ | |
597 | } | |
598 | DEADLINE_DEBUGFS_DDIR_ATTRS(READ, read) | |
599 | DEADLINE_DEBUGFS_DDIR_ATTRS(WRITE, write) | |
600 | #undef DEADLINE_DEBUGFS_DDIR_ATTRS | |
601 | ||
602 | static int deadline_batching_show(void *data, struct seq_file *m) | |
603 | { | |
604 | struct request_queue *q = data; | |
605 | struct deadline_data *dd = q->elevator->elevator_data; | |
606 | ||
607 | seq_printf(m, "%u\n", dd->batching); | |
608 | return 0; | |
609 | } | |
610 | ||
611 | static int deadline_starved_show(void *data, struct seq_file *m) | |
612 | { | |
613 | struct request_queue *q = data; | |
614 | struct deadline_data *dd = q->elevator->elevator_data; | |
615 | ||
616 | seq_printf(m, "%u\n", dd->starved); | |
617 | return 0; | |
618 | } | |
619 | ||
620 | static void *deadline_dispatch_start(struct seq_file *m, loff_t *pos) | |
621 | __acquires(&dd->lock) | |
622 | { | |
623 | struct request_queue *q = m->private; | |
624 | struct deadline_data *dd = q->elevator->elevator_data; | |
625 | ||
626 | spin_lock(&dd->lock); | |
627 | return seq_list_start(&dd->dispatch, *pos); | |
628 | } | |
629 | ||
630 | static void *deadline_dispatch_next(struct seq_file *m, void *v, loff_t *pos) | |
631 | { | |
632 | struct request_queue *q = m->private; | |
633 | struct deadline_data *dd = q->elevator->elevator_data; | |
634 | ||
635 | return seq_list_next(v, &dd->dispatch, pos); | |
636 | } | |
637 | ||
638 | static void deadline_dispatch_stop(struct seq_file *m, void *v) | |
639 | __releases(&dd->lock) | |
640 | { | |
641 | struct request_queue *q = m->private; | |
642 | struct deadline_data *dd = q->elevator->elevator_data; | |
643 | ||
644 | spin_unlock(&dd->lock); | |
645 | } | |
646 | ||
647 | static const struct seq_operations deadline_dispatch_seq_ops = { | |
648 | .start = deadline_dispatch_start, | |
649 | .next = deadline_dispatch_next, | |
650 | .stop = deadline_dispatch_stop, | |
651 | .show = blk_mq_debugfs_rq_show, | |
652 | }; | |
653 | ||
654 | #define DEADLINE_QUEUE_DDIR_ATTRS(name) \ | |
655 | {#name "_fifo_list", 0400, .seq_ops = &deadline_##name##_fifo_seq_ops}, \ | |
656 | {#name "_next_rq", 0400, deadline_##name##_next_rq_show} | |
657 | static const struct blk_mq_debugfs_attr deadline_queue_debugfs_attrs[] = { | |
658 | DEADLINE_QUEUE_DDIR_ATTRS(read), | |
659 | DEADLINE_QUEUE_DDIR_ATTRS(write), | |
660 | {"batching", 0400, deadline_batching_show}, | |
661 | {"starved", 0400, deadline_starved_show}, | |
662 | {"dispatch", 0400, .seq_ops = &deadline_dispatch_seq_ops}, | |
663 | {}, | |
664 | }; | |
665 | #undef DEADLINE_QUEUE_DDIR_ATTRS | |
666 | #endif | |
667 | ||
945ffb60 JA |
668 | static struct elevator_type mq_deadline = { |
669 | .ops.mq = { | |
670 | .insert_requests = dd_insert_requests, | |
c13660a0 | 671 | .dispatch_request = dd_dispatch_request, |
945ffb60 JA |
672 | .next_request = elv_rb_latter_request, |
673 | .former_request = elv_rb_former_request, | |
674 | .bio_merge = dd_bio_merge, | |
675 | .request_merge = dd_request_merge, | |
676 | .requests_merged = dd_merged_requests, | |
677 | .request_merged = dd_request_merged, | |
678 | .has_work = dd_has_work, | |
679 | .init_sched = dd_init_queue, | |
680 | .exit_sched = dd_exit_queue, | |
681 | }, | |
682 | ||
683 | .uses_mq = true, | |
daaadb3e OS |
684 | #ifdef CONFIG_BLK_DEBUG_FS |
685 | .queue_debugfs_attrs = deadline_queue_debugfs_attrs, | |
686 | #endif | |
945ffb60 JA |
687 | .elevator_attrs = deadline_attrs, |
688 | .elevator_name = "mq-deadline", | |
4d740bc9 | 689 | .elevator_alias = "deadline", |
945ffb60 JA |
690 | .elevator_owner = THIS_MODULE, |
691 | }; | |
7de967e7 | 692 | MODULE_ALIAS("mq-deadline-iosched"); |
945ffb60 JA |
693 | |
694 | static int __init deadline_init(void) | |
695 | { | |
696 | return elv_register(&mq_deadline); | |
697 | } | |
698 | ||
699 | static void __exit deadline_exit(void) | |
700 | { | |
701 | elv_unregister(&mq_deadline); | |
702 | } | |
703 | ||
704 | module_init(deadline_init); | |
705 | module_exit(deadline_exit); | |
706 | ||
707 | MODULE_AUTHOR("Jens Axboe"); | |
708 | MODULE_LICENSE("GPL"); | |
709 | MODULE_DESCRIPTION("MQ deadline IO scheduler"); |