block/mq-deadline: Stop using per-CPU counters
[linux-block.git] / block / mq-deadline.c
CommitLineData
3dcf60bc 1// SPDX-License-Identifier: GPL-2.0
945ffb60
JA
2/*
3 * MQ Deadline i/o scheduler - adaptation of the legacy deadline scheduler,
4 * for the blk-mq scheduling framework
5 *
6 * Copyright (C) 2016 Jens Axboe <axboe@kernel.dk>
7 */
8#include <linux/kernel.h>
9#include <linux/fs.h>
10#include <linux/blkdev.h>
11#include <linux/blk-mq.h>
945ffb60
JA
12#include <linux/bio.h>
13#include <linux/module.h>
14#include <linux/slab.h>
15#include <linux/init.h>
16#include <linux/compiler.h>
17#include <linux/rbtree.h>
18#include <linux/sbitmap.h>
19
b357e4a6
CK
20#include <trace/events/block.h>
21
2e9bc346 22#include "elevator.h"
945ffb60
JA
23#include "blk.h"
24#include "blk-mq.h"
daaadb3e 25#include "blk-mq-debugfs.h"
945ffb60
JA
26#include "blk-mq-tag.h"
27#include "blk-mq-sched.h"
28
29/*
898bd37a 30 * See Documentation/block/deadline-iosched.rst
945ffb60
JA
31 */
32static const int read_expire = HZ / 2; /* max time before a read is submitted. */
33static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */
34static const int writes_starved = 2; /* max times reads can starve a write */
35static const int fifo_batch = 16; /* # of sequential requests treated as one
36 by the above parameters. For throughput. */
37
004a26b3
BVA
38enum dd_data_dir {
39 DD_READ = READ,
40 DD_WRITE = WRITE,
41};
42
43enum { DD_DIR_COUNT = 2 };
44
c807ab52
BVA
45enum dd_prio {
46 DD_RT_PRIO = 0,
47 DD_BE_PRIO = 1,
48 DD_IDLE_PRIO = 2,
49 DD_PRIO_MAX = 2,
50};
51
52enum { DD_PRIO_COUNT = 3 };
53
bce0363e
BVA
54/*
55 * I/O statistics per I/O priority. It is fine if these counters overflow.
56 * What matters is that these counters are at least as wide as
57 * log2(max_outstanding_requests).
58 */
0f783995 59struct io_stats_per_prio {
bce0363e
BVA
60 uint32_t inserted;
61 uint32_t merged;
62 uint32_t dispatched;
63 atomic_t completed;
38ba64d1
BVA
64};
65
c807ab52
BVA
66/*
67 * Deadline scheduler data per I/O priority (enum dd_prio). Requests are
68 * present on both sort_list[] and fifo_list[].
69 */
70struct dd_per_prio {
71 struct list_head dispatch;
72 struct rb_root sort_list[DD_DIR_COUNT];
73 struct list_head fifo_list[DD_DIR_COUNT];
74 /* Next request in FIFO order. Read, write or both are NULL. */
75 struct request *next_rq[DD_DIR_COUNT];
bce0363e 76 struct io_stats_per_prio stats;
c807ab52
BVA
77};
78
945ffb60
JA
79struct deadline_data {
80 /*
81 * run time data
82 */
83
c807ab52 84 struct dd_per_prio per_prio[DD_PRIO_COUNT];
945ffb60 85
d672d325
BVA
86 /* Data direction of latest dispatched request. */
87 enum dd_data_dir last_dir;
945ffb60
JA
88 unsigned int batching; /* number of sequential requests made */
89 unsigned int starved; /* times reads have starved writes */
90
91 /*
92 * settings that change how the i/o scheduler behaves
93 */
004a26b3 94 int fifo_expire[DD_DIR_COUNT];
945ffb60
JA
95 int fifo_batch;
96 int writes_starved;
97 int front_merges;
07757588 98 u32 async_depth;
945ffb60
JA
99
100 spinlock_t lock;
5700f691 101 spinlock_t zone_lock;
c807ab52
BVA
102};
103
104/* Maps an I/O priority class to a deadline scheduler priority. */
105static const enum dd_prio ioprio_class_to_prio[] = {
106 [IOPRIO_CLASS_NONE] = DD_BE_PRIO,
107 [IOPRIO_CLASS_RT] = DD_RT_PRIO,
108 [IOPRIO_CLASS_BE] = DD_BE_PRIO,
109 [IOPRIO_CLASS_IDLE] = DD_IDLE_PRIO,
945ffb60
JA
110};
111
112static inline struct rb_root *
c807ab52 113deadline_rb_root(struct dd_per_prio *per_prio, struct request *rq)
945ffb60 114{
c807ab52
BVA
115 return &per_prio->sort_list[rq_data_dir(rq)];
116}
117
118/*
119 * Returns the I/O priority class (IOPRIO_CLASS_*) that has been assigned to a
120 * request.
121 */
122static u8 dd_rq_ioclass(struct request *rq)
123{
124 return IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
945ffb60
JA
125}
126
127/*
128 * get the request after `rq' in sector-sorted order
129 */
130static inline struct request *
131deadline_latter_request(struct request *rq)
132{
133 struct rb_node *node = rb_next(&rq->rb_node);
134
135 if (node)
136 return rb_entry_rq(node);
137
138 return NULL;
139}
140
141static void
c807ab52 142deadline_add_rq_rb(struct dd_per_prio *per_prio, struct request *rq)
945ffb60 143{
c807ab52 144 struct rb_root *root = deadline_rb_root(per_prio, rq);
945ffb60
JA
145
146 elv_rb_add(root, rq);
147}
148
149static inline void
c807ab52 150deadline_del_rq_rb(struct dd_per_prio *per_prio, struct request *rq)
945ffb60 151{
004a26b3 152 const enum dd_data_dir data_dir = rq_data_dir(rq);
945ffb60 153
c807ab52
BVA
154 if (per_prio->next_rq[data_dir] == rq)
155 per_prio->next_rq[data_dir] = deadline_latter_request(rq);
945ffb60 156
c807ab52 157 elv_rb_del(deadline_rb_root(per_prio, rq), rq);
945ffb60
JA
158}
159
160/*
161 * remove rq from rbtree and fifo.
162 */
c807ab52
BVA
163static void deadline_remove_request(struct request_queue *q,
164 struct dd_per_prio *per_prio,
165 struct request *rq)
945ffb60 166{
945ffb60
JA
167 list_del_init(&rq->queuelist);
168
169 /*
170 * We might not be on the rbtree, if we are doing an insert merge
171 */
172 if (!RB_EMPTY_NODE(&rq->rb_node))
c807ab52 173 deadline_del_rq_rb(per_prio, rq);
945ffb60
JA
174
175 elv_rqhash_del(q, rq);
176 if (q->last_merge == rq)
177 q->last_merge = NULL;
178}
179
180static void dd_request_merged(struct request_queue *q, struct request *req,
34fe7c05 181 enum elv_merge type)
945ffb60
JA
182{
183 struct deadline_data *dd = q->elevator->elevator_data;
c807ab52
BVA
184 const u8 ioprio_class = dd_rq_ioclass(req);
185 const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
186 struct dd_per_prio *per_prio = &dd->per_prio[prio];
945ffb60
JA
187
188 /*
189 * if the merge was a front merge, we need to reposition request
190 */
191 if (type == ELEVATOR_FRONT_MERGE) {
c807ab52
BVA
192 elv_rb_del(deadline_rb_root(per_prio, req), req);
193 deadline_add_rq_rb(per_prio, req);
945ffb60
JA
194 }
195}
196
46eae2e3
BVA
197/*
198 * Callback function that is invoked after @next has been merged into @req.
199 */
945ffb60
JA
200static void dd_merged_requests(struct request_queue *q, struct request *req,
201 struct request *next)
202{
38ba64d1 203 struct deadline_data *dd = q->elevator->elevator_data;
c807ab52
BVA
204 const u8 ioprio_class = dd_rq_ioclass(next);
205 const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
206
bce0363e
BVA
207 lockdep_assert_held(&dd->lock);
208
209 dd->per_prio[prio].stats.merged++;
38ba64d1 210
945ffb60
JA
211 /*
212 * if next expires before rq, assign its expire time to rq
213 * and move into next position (next will be deleted) in fifo
214 */
215 if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) {
216 if (time_before((unsigned long)next->fifo_time,
217 (unsigned long)req->fifo_time)) {
218 list_move(&req->queuelist, &next->queuelist);
219 req->fifo_time = next->fifo_time;
220 }
221 }
222
223 /*
224 * kill knowledge of next, this one is a goner
225 */
c807ab52 226 deadline_remove_request(q, &dd->per_prio[prio], next);
945ffb60
JA
227}
228
229/*
230 * move an entry to dispatch queue
231 */
232static void
c807ab52
BVA
233deadline_move_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
234 struct request *rq)
945ffb60 235{
004a26b3 236 const enum dd_data_dir data_dir = rq_data_dir(rq);
945ffb60 237
c807ab52 238 per_prio->next_rq[data_dir] = deadline_latter_request(rq);
945ffb60
JA
239
240 /*
241 * take it off the sort and fifo list
242 */
c807ab52 243 deadline_remove_request(rq->q, per_prio, rq);
945ffb60
JA
244}
245
32f64cad
BVA
246/* Number of requests queued for a given priority level. */
247static u32 dd_queued(struct deadline_data *dd, enum dd_prio prio)
248{
bce0363e
BVA
249 const struct io_stats_per_prio *stats = &dd->per_prio[prio].stats;
250
251 lockdep_assert_held(&dd->lock);
252
253 return stats->inserted - atomic_read(&stats->completed);
32f64cad
BVA
254}
255
945ffb60
JA
256/*
257 * deadline_check_fifo returns 0 if there are no expired requests on the fifo,
258 * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir])
259 */
c807ab52 260static inline int deadline_check_fifo(struct dd_per_prio *per_prio,
004a26b3 261 enum dd_data_dir data_dir)
945ffb60 262{
c807ab52 263 struct request *rq = rq_entry_fifo(per_prio->fifo_list[data_dir].next);
945ffb60
JA
264
265 /*
266 * rq is expired!
267 */
268 if (time_after_eq(jiffies, (unsigned long)rq->fifo_time))
269 return 1;
270
271 return 0;
272}
273
bf09ce56
DLM
274/*
275 * For the specified data direction, return the next request to
276 * dispatch using arrival ordered lists.
277 */
278static struct request *
c807ab52
BVA
279deadline_fifo_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
280 enum dd_data_dir data_dir)
bf09ce56 281{
5700f691
DLM
282 struct request *rq;
283 unsigned long flags;
284
c807ab52 285 if (list_empty(&per_prio->fifo_list[data_dir]))
bf09ce56
DLM
286 return NULL;
287
c807ab52 288 rq = rq_entry_fifo(per_prio->fifo_list[data_dir].next);
004a26b3 289 if (data_dir == DD_READ || !blk_queue_is_zoned(rq->q))
5700f691
DLM
290 return rq;
291
292 /*
293 * Look for a write request that can be dispatched, that is one with
294 * an unlocked target zone.
295 */
296 spin_lock_irqsave(&dd->zone_lock, flags);
c807ab52 297 list_for_each_entry(rq, &per_prio->fifo_list[DD_WRITE], queuelist) {
5700f691
DLM
298 if (blk_req_can_dispatch_to_zone(rq))
299 goto out;
300 }
301 rq = NULL;
302out:
303 spin_unlock_irqrestore(&dd->zone_lock, flags);
304
305 return rq;
bf09ce56
DLM
306}
307
308/*
309 * For the specified data direction, return the next request to
310 * dispatch using sector position sorted lists.
311 */
312static struct request *
c807ab52
BVA
313deadline_next_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
314 enum dd_data_dir data_dir)
bf09ce56 315{
5700f691
DLM
316 struct request *rq;
317 unsigned long flags;
318
c807ab52 319 rq = per_prio->next_rq[data_dir];
5700f691
DLM
320 if (!rq)
321 return NULL;
322
004a26b3 323 if (data_dir == DD_READ || !blk_queue_is_zoned(rq->q))
5700f691
DLM
324 return rq;
325
326 /*
327 * Look for a write request that can be dispatched, that is one with
328 * an unlocked target zone.
329 */
330 spin_lock_irqsave(&dd->zone_lock, flags);
331 while (rq) {
332 if (blk_req_can_dispatch_to_zone(rq))
333 break;
334 rq = deadline_latter_request(rq);
335 }
336 spin_unlock_irqrestore(&dd->zone_lock, flags);
337
338 return rq;
bf09ce56
DLM
339}
340
945ffb60
JA
341/*
342 * deadline_dispatch_requests selects the best request according to
7b05bf77 343 * read/write expire, fifo_batch, etc
945ffb60 344 */
c807ab52 345static struct request *__dd_dispatch_request(struct deadline_data *dd,
7b05bf77 346 struct dd_per_prio *per_prio)
945ffb60 347{
bf09ce56 348 struct request *rq, *next_rq;
004a26b3 349 enum dd_data_dir data_dir;
38ba64d1
BVA
350 enum dd_prio prio;
351 u8 ioprio_class;
945ffb60 352
3bd473f4
BVA
353 lockdep_assert_held(&dd->lock);
354
c807ab52
BVA
355 if (!list_empty(&per_prio->dispatch)) {
356 rq = list_first_entry(&per_prio->dispatch, struct request,
357 queuelist);
945ffb60
JA
358 list_del_init(&rq->queuelist);
359 goto done;
360 }
361
945ffb60
JA
362 /*
363 * batches are currently reads XOR writes
364 */
c807ab52 365 rq = deadline_next_request(dd, per_prio, dd->last_dir);
945ffb60
JA
366 if (rq && dd->batching < dd->fifo_batch)
367 /* we have a next request are still entitled to batch */
368 goto dispatch_request;
369
370 /*
371 * at this point we are not running a batch. select the appropriate
372 * data direction (read / write)
373 */
374
c807ab52
BVA
375 if (!list_empty(&per_prio->fifo_list[DD_READ])) {
376 BUG_ON(RB_EMPTY_ROOT(&per_prio->sort_list[DD_READ]));
945ffb60 377
c807ab52 378 if (deadline_fifo_request(dd, per_prio, DD_WRITE) &&
5700f691 379 (dd->starved++ >= dd->writes_starved))
945ffb60
JA
380 goto dispatch_writes;
381
004a26b3 382 data_dir = DD_READ;
945ffb60
JA
383
384 goto dispatch_find_request;
385 }
386
387 /*
388 * there are either no reads or writes have been starved
389 */
390
c807ab52 391 if (!list_empty(&per_prio->fifo_list[DD_WRITE])) {
945ffb60 392dispatch_writes:
c807ab52 393 BUG_ON(RB_EMPTY_ROOT(&per_prio->sort_list[DD_WRITE]));
945ffb60
JA
394
395 dd->starved = 0;
396
004a26b3 397 data_dir = DD_WRITE;
945ffb60
JA
398
399 goto dispatch_find_request;
400 }
401
402 return NULL;
403
404dispatch_find_request:
405 /*
406 * we are not running a batch, find best request for selected data_dir
407 */
c807ab52
BVA
408 next_rq = deadline_next_request(dd, per_prio, data_dir);
409 if (deadline_check_fifo(per_prio, data_dir) || !next_rq) {
945ffb60
JA
410 /*
411 * A deadline has expired, the last request was in the other
412 * direction, or we have run out of higher-sectored requests.
413 * Start again from the request with the earliest expiry time.
414 */
c807ab52 415 rq = deadline_fifo_request(dd, per_prio, data_dir);
945ffb60
JA
416 } else {
417 /*
418 * The last req was the same dir and we have a next request in
419 * sort order. No expired requests so continue on from here.
420 */
bf09ce56 421 rq = next_rq;
945ffb60
JA
422 }
423
5700f691
DLM
424 /*
425 * For a zoned block device, if we only have writes queued and none of
426 * them can be dispatched, rq will be NULL.
427 */
428 if (!rq)
429 return NULL;
430
d672d325 431 dd->last_dir = data_dir;
945ffb60
JA
432 dd->batching = 0;
433
434dispatch_request:
435 /*
436 * rq is the selected appropriate request.
437 */
438 dd->batching++;
c807ab52 439 deadline_move_request(dd, per_prio, rq);
945ffb60 440done:
38ba64d1
BVA
441 ioprio_class = dd_rq_ioclass(rq);
442 prio = ioprio_class_to_prio[ioprio_class];
bce0363e 443 dd->per_prio[prio].stats.dispatched++;
5700f691
DLM
444 /*
445 * If the request needs its target zone locked, do it.
446 */
447 blk_req_zone_write_lock(rq);
945ffb60
JA
448 rq->rq_flags |= RQF_STARTED;
449 return rq;
450}
451
ca11f209 452/*
46eae2e3
BVA
453 * Called from blk_mq_run_hw_queue() -> __blk_mq_sched_dispatch_requests().
454 *
ca11f209 455 * One confusing aspect here is that we get called for a specific
7211aef8 456 * hardware queue, but we may return a request that is for a
ca11f209
JA
457 * different hardware queue. This is because mq-deadline has shared
458 * state for all hardware queues, in terms of sorting, FIFOs, etc.
459 */
c13660a0 460static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
945ffb60
JA
461{
462 struct deadline_data *dd = hctx->queue->elevator->elevator_data;
7b05bf77 463 struct request *rq;
c807ab52 464 enum dd_prio prio;
945ffb60
JA
465
466 spin_lock(&dd->lock);
fb926032 467 for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
7b05bf77
JA
468 rq = __dd_dispatch_request(dd, &dd->per_prio[prio]);
469 if (rq)
c807ab52
BVA
470 break;
471 }
945ffb60 472 spin_unlock(&dd->lock);
c13660a0
JA
473
474 return rq;
945ffb60
JA
475}
476
07757588
BVA
477/*
478 * Called by __blk_mq_alloc_request(). The shallow_depth value set by this
479 * function is used by __blk_mq_get_tag().
480 */
481static void dd_limit_depth(unsigned int op, struct blk_mq_alloc_data *data)
482{
483 struct deadline_data *dd = data->q->elevator->elevator_data;
484
485 /* Do not throttle synchronous reads. */
486 if (op_is_sync(op) && !op_is_write(op))
487 return;
488
489 /*
490 * Throttle asynchronous requests and writes such that these requests
491 * do not block the allocation of synchronous requests.
492 */
493 data->shallow_depth = dd->async_depth;
494}
495
496/* Called by blk_mq_update_nr_requests(). */
497static void dd_depth_updated(struct blk_mq_hw_ctx *hctx)
498{
499 struct request_queue *q = hctx->queue;
500 struct deadline_data *dd = q->elevator->elevator_data;
501 struct blk_mq_tags *tags = hctx->sched_tags;
502
503 dd->async_depth = max(1UL, 3 * q->nr_requests / 4);
504
505 sbitmap_queue_min_shallow_depth(tags->bitmap_tags, dd->async_depth);
506}
507
508/* Called by blk_mq_init_hctx() and blk_mq_init_sched(). */
509static int dd_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
510{
511 dd_depth_updated(hctx);
512 return 0;
513}
514
3e9a99eb 515static void dd_exit_sched(struct elevator_queue *e)
945ffb60
JA
516{
517 struct deadline_data *dd = e->elevator_data;
c807ab52 518 enum dd_prio prio;
945ffb60 519
c807ab52
BVA
520 for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
521 struct dd_per_prio *per_prio = &dd->per_prio[prio];
bce0363e
BVA
522 const struct io_stats_per_prio *stats = &per_prio->stats;
523 uint32_t queued;
c807ab52
BVA
524
525 WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_READ]));
526 WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_WRITE]));
bce0363e
BVA
527
528 spin_lock(&dd->lock);
529 queued = dd_queued(dd, prio);
530 spin_unlock(&dd->lock);
531
532 WARN_ONCE(queued != 0,
32f64cad 533 "statistics for priority %d: i %u m %u d %u c %u\n",
bce0363e
BVA
534 prio, stats->inserted, stats->merged,
535 stats->dispatched, atomic_read(&stats->completed));
c807ab52 536 }
945ffb60
JA
537
538 kfree(dd);
539}
540
541/*
0f783995 542 * initialize elevator private data (deadline_data).
945ffb60 543 */
3e9a99eb 544static int dd_init_sched(struct request_queue *q, struct elevator_type *e)
945ffb60
JA
545{
546 struct deadline_data *dd;
547 struct elevator_queue *eq;
c807ab52
BVA
548 enum dd_prio prio;
549 int ret = -ENOMEM;
945ffb60
JA
550
551 eq = elevator_alloc(q, e);
552 if (!eq)
c807ab52 553 return ret;
945ffb60
JA
554
555 dd = kzalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
c807ab52
BVA
556 if (!dd)
557 goto put_eq;
558
945ffb60
JA
559 eq->elevator_data = dd;
560
c807ab52
BVA
561 for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
562 struct dd_per_prio *per_prio = &dd->per_prio[prio];
563
564 INIT_LIST_HEAD(&per_prio->dispatch);
565 INIT_LIST_HEAD(&per_prio->fifo_list[DD_READ]);
566 INIT_LIST_HEAD(&per_prio->fifo_list[DD_WRITE]);
567 per_prio->sort_list[DD_READ] = RB_ROOT;
568 per_prio->sort_list[DD_WRITE] = RB_ROOT;
569 }
004a26b3
BVA
570 dd->fifo_expire[DD_READ] = read_expire;
571 dd->fifo_expire[DD_WRITE] = write_expire;
945ffb60
JA
572 dd->writes_starved = writes_starved;
573 dd->front_merges = 1;
d672d325 574 dd->last_dir = DD_WRITE;
945ffb60
JA
575 dd->fifo_batch = fifo_batch;
576 spin_lock_init(&dd->lock);
5700f691 577 spin_lock_init(&dd->zone_lock);
945ffb60
JA
578
579 q->elevator = eq;
580 return 0;
c807ab52
BVA
581
582put_eq:
583 kobject_put(&eq->kobj);
584 return ret;
945ffb60
JA
585}
586
46eae2e3
BVA
587/*
588 * Try to merge @bio into an existing request. If @bio has been merged into
589 * an existing request, store the pointer to that request into *@rq.
590 */
945ffb60
JA
591static int dd_request_merge(struct request_queue *q, struct request **rq,
592 struct bio *bio)
593{
594 struct deadline_data *dd = q->elevator->elevator_data;
c807ab52
BVA
595 const u8 ioprio_class = IOPRIO_PRIO_CLASS(bio->bi_ioprio);
596 const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
597 struct dd_per_prio *per_prio = &dd->per_prio[prio];
945ffb60
JA
598 sector_t sector = bio_end_sector(bio);
599 struct request *__rq;
600
601 if (!dd->front_merges)
602 return ELEVATOR_NO_MERGE;
603
c807ab52 604 __rq = elv_rb_find(&per_prio->sort_list[bio_data_dir(bio)], sector);
945ffb60
JA
605 if (__rq) {
606 BUG_ON(sector != blk_rq_pos(__rq));
607
608 if (elv_bio_merge_ok(__rq, bio)) {
609 *rq = __rq;
866663b7
ML
610 if (blk_discard_mergable(__rq))
611 return ELEVATOR_DISCARD_MERGE;
945ffb60
JA
612 return ELEVATOR_FRONT_MERGE;
613 }
614 }
615
616 return ELEVATOR_NO_MERGE;
617}
618
46eae2e3
BVA
619/*
620 * Attempt to merge a bio into an existing request. This function is called
621 * before @bio is associated with a request.
622 */
efed9a33 623static bool dd_bio_merge(struct request_queue *q, struct bio *bio,
14ccb66b 624 unsigned int nr_segs)
945ffb60 625{
945ffb60 626 struct deadline_data *dd = q->elevator->elevator_data;
e4d750c9
JA
627 struct request *free = NULL;
628 bool ret;
945ffb60
JA
629
630 spin_lock(&dd->lock);
14ccb66b 631 ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free);
945ffb60
JA
632 spin_unlock(&dd->lock);
633
e4d750c9
JA
634 if (free)
635 blk_mq_free_request(free);
636
945ffb60
JA
637 return ret;
638}
639
640/*
641 * add rq to rbtree and fifo
642 */
643static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
644 bool at_head)
645{
646 struct request_queue *q = hctx->queue;
647 struct deadline_data *dd = q->elevator->elevator_data;
004a26b3 648 const enum dd_data_dir data_dir = rq_data_dir(rq);
c807ab52
BVA
649 u16 ioprio = req_get_ioprio(rq);
650 u8 ioprio_class = IOPRIO_PRIO_CLASS(ioprio);
651 struct dd_per_prio *per_prio;
652 enum dd_prio prio;
fd2ef39c 653 LIST_HEAD(free);
945ffb60 654
3bd473f4
BVA
655 lockdep_assert_held(&dd->lock);
656
5700f691
DLM
657 /*
658 * This may be a requeue of a write request that has locked its
659 * target zone. If it is the case, this releases the zone lock.
660 */
661 blk_req_zone_write_unlock(rq);
662
c807ab52 663 prio = ioprio_class_to_prio[ioprio_class];
bce0363e 664 per_prio = &dd->per_prio[prio];
e2c7275d 665 if (!rq->elv.priv[0]) {
bce0363e 666 per_prio->stats.inserted++;
e2c7275d
BVA
667 rq->elv.priv[0] = (void *)(uintptr_t)1;
668 }
c807ab52 669
fd2ef39c
JK
670 if (blk_mq_sched_try_insert_merge(q, rq, &free)) {
671 blk_mq_free_requests(&free);
945ffb60 672 return;
fd2ef39c 673 }
945ffb60 674
b357e4a6 675 trace_block_rq_insert(rq);
945ffb60 676
7687b38a 677 if (at_head) {
c807ab52 678 list_add(&rq->queuelist, &per_prio->dispatch);
945ffb60 679 } else {
c807ab52 680 deadline_add_rq_rb(per_prio, rq);
945ffb60
JA
681
682 if (rq_mergeable(rq)) {
683 elv_rqhash_add(q, rq);
684 if (!q->last_merge)
685 q->last_merge = rq;
686 }
687
688 /*
689 * set expire time and add to fifo list
690 */
691 rq->fifo_time = jiffies + dd->fifo_expire[data_dir];
c807ab52 692 list_add_tail(&rq->queuelist, &per_prio->fifo_list[data_dir]);
945ffb60
JA
693 }
694}
695
46eae2e3
BVA
696/*
697 * Called from blk_mq_sched_insert_request() or blk_mq_sched_insert_requests().
698 */
945ffb60
JA
699static void dd_insert_requests(struct blk_mq_hw_ctx *hctx,
700 struct list_head *list, bool at_head)
701{
702 struct request_queue *q = hctx->queue;
703 struct deadline_data *dd = q->elevator->elevator_data;
704
705 spin_lock(&dd->lock);
706 while (!list_empty(list)) {
707 struct request *rq;
708
709 rq = list_first_entry(list, struct request, queuelist);
710 list_del_init(&rq->queuelist);
711 dd_insert_request(hctx, rq, at_head);
712 }
713 spin_unlock(&dd->lock);
714}
715
b6d2b054 716/* Callback from inside blk_mq_rq_ctx_init(). */
5d9c305b 717static void dd_prepare_request(struct request *rq)
f3bc78d2 718{
b6d2b054 719 rq->elv.priv[0] = NULL;
f3bc78d2
DLM
720}
721
5700f691 722/*
46eae2e3
BVA
723 * Callback from inside blk_mq_free_request().
724 *
5700f691
DLM
725 * For zoned block devices, write unlock the target zone of
726 * completed write requests. Do this while holding the zone lock
727 * spinlock so that the zone is never unlocked while deadline_fifo_request()
f3bc78d2
DLM
728 * or deadline_next_request() are executing. This function is called for
729 * all requests, whether or not these requests complete successfully.
cb8acabb
DLM
730 *
731 * For a zoned block device, __dd_dispatch_request() may have stopped
732 * dispatching requests if all the queued requests are write requests directed
733 * at zones that are already locked due to on-going write requests. To ensure
734 * write request dispatch progress in this case, mark the queue as needing a
735 * restart to ensure that the queue is run again after completion of the
736 * request and zones being unlocked.
5700f691 737 */
f3bc78d2 738static void dd_finish_request(struct request *rq)
5700f691
DLM
739{
740 struct request_queue *q = rq->q;
c807ab52
BVA
741 struct deadline_data *dd = q->elevator->elevator_data;
742 const u8 ioprio_class = dd_rq_ioclass(rq);
743 const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
744 struct dd_per_prio *per_prio = &dd->per_prio[prio];
5700f691 745
b6d2b054
BVA
746 /*
747 * The block layer core may call dd_finish_request() without having
e2c7275d
BVA
748 * called dd_insert_requests(). Skip requests that bypassed I/O
749 * scheduling. See also blk_mq_request_bypass_insert().
b6d2b054 750 */
e2c7275d
BVA
751 if (!rq->elv.priv[0])
752 return;
753
bce0363e 754 atomic_inc(&per_prio->stats.completed);
38ba64d1 755
5700f691 756 if (blk_queue_is_zoned(q)) {
5700f691
DLM
757 unsigned long flags;
758
759 spin_lock_irqsave(&dd->zone_lock, flags);
760 blk_req_zone_write_unlock(rq);
c807ab52 761 if (!list_empty(&per_prio->fifo_list[DD_WRITE]))
cb8acabb 762 blk_mq_sched_mark_restart_hctx(rq->mq_hctx);
5700f691
DLM
763 spin_unlock_irqrestore(&dd->zone_lock, flags);
764 }
765}
766
c807ab52
BVA
767static bool dd_has_work_for_prio(struct dd_per_prio *per_prio)
768{
769 return !list_empty_careful(&per_prio->dispatch) ||
770 !list_empty_careful(&per_prio->fifo_list[DD_READ]) ||
771 !list_empty_careful(&per_prio->fifo_list[DD_WRITE]);
772}
773
945ffb60
JA
774static bool dd_has_work(struct blk_mq_hw_ctx *hctx)
775{
776 struct deadline_data *dd = hctx->queue->elevator->elevator_data;
c807ab52
BVA
777 enum dd_prio prio;
778
779 for (prio = 0; prio <= DD_PRIO_MAX; prio++)
780 if (dd_has_work_for_prio(&dd->per_prio[prio]))
781 return true;
945ffb60 782
c807ab52 783 return false;
945ffb60
JA
784}
785
786/*
787 * sysfs parts below
788 */
d6d7f013 789#define SHOW_INT(__FUNC, __VAR) \
945ffb60
JA
790static ssize_t __FUNC(struct elevator_queue *e, char *page) \
791{ \
792 struct deadline_data *dd = e->elevator_data; \
d6d7f013
BVA
793 \
794 return sysfs_emit(page, "%d\n", __VAR); \
945ffb60 795}
d6d7f013
BVA
796#define SHOW_JIFFIES(__FUNC, __VAR) SHOW_INT(__FUNC, jiffies_to_msecs(__VAR))
797SHOW_JIFFIES(deadline_read_expire_show, dd->fifo_expire[DD_READ]);
798SHOW_JIFFIES(deadline_write_expire_show, dd->fifo_expire[DD_WRITE]);
799SHOW_INT(deadline_writes_starved_show, dd->writes_starved);
800SHOW_INT(deadline_front_merges_show, dd->front_merges);
07757588 801SHOW_INT(deadline_async_depth_show, dd->front_merges);
d6d7f013
BVA
802SHOW_INT(deadline_fifo_batch_show, dd->fifo_batch);
803#undef SHOW_INT
804#undef SHOW_JIFFIES
945ffb60
JA
805
806#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
807static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
808{ \
809 struct deadline_data *dd = e->elevator_data; \
d6d7f013
BVA
810 int __data, __ret; \
811 \
812 __ret = kstrtoint(page, 0, &__data); \
813 if (__ret < 0) \
814 return __ret; \
945ffb60
JA
815 if (__data < (MIN)) \
816 __data = (MIN); \
817 else if (__data > (MAX)) \
818 __data = (MAX); \
d6d7f013 819 *(__PTR) = __CONV(__data); \
235f8da1 820 return count; \
945ffb60 821}
d6d7f013
BVA
822#define STORE_INT(__FUNC, __PTR, MIN, MAX) \
823 STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, )
824#define STORE_JIFFIES(__FUNC, __PTR, MIN, MAX) \
825 STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, msecs_to_jiffies)
826STORE_JIFFIES(deadline_read_expire_store, &dd->fifo_expire[DD_READ], 0, INT_MAX);
827STORE_JIFFIES(deadline_write_expire_store, &dd->fifo_expire[DD_WRITE], 0, INT_MAX);
828STORE_INT(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX);
829STORE_INT(deadline_front_merges_store, &dd->front_merges, 0, 1);
07757588 830STORE_INT(deadline_async_depth_store, &dd->front_merges, 1, INT_MAX);
d6d7f013 831STORE_INT(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX);
945ffb60 832#undef STORE_FUNCTION
d6d7f013
BVA
833#undef STORE_INT
834#undef STORE_JIFFIES
945ffb60
JA
835
836#define DD_ATTR(name) \
5657a819 837 __ATTR(name, 0644, deadline_##name##_show, deadline_##name##_store)
945ffb60
JA
838
839static struct elv_fs_entry deadline_attrs[] = {
840 DD_ATTR(read_expire),
841 DD_ATTR(write_expire),
842 DD_ATTR(writes_starved),
843 DD_ATTR(front_merges),
07757588 844 DD_ATTR(async_depth),
945ffb60
JA
845 DD_ATTR(fifo_batch),
846 __ATTR_NULL
847};
848
daaadb3e 849#ifdef CONFIG_BLK_DEBUG_FS
c807ab52 850#define DEADLINE_DEBUGFS_DDIR_ATTRS(prio, data_dir, name) \
daaadb3e
OS
851static void *deadline_##name##_fifo_start(struct seq_file *m, \
852 loff_t *pos) \
853 __acquires(&dd->lock) \
854{ \
855 struct request_queue *q = m->private; \
856 struct deadline_data *dd = q->elevator->elevator_data; \
c807ab52 857 struct dd_per_prio *per_prio = &dd->per_prio[prio]; \
daaadb3e
OS
858 \
859 spin_lock(&dd->lock); \
c807ab52 860 return seq_list_start(&per_prio->fifo_list[data_dir], *pos); \
daaadb3e
OS
861} \
862 \
863static void *deadline_##name##_fifo_next(struct seq_file *m, void *v, \
864 loff_t *pos) \
865{ \
866 struct request_queue *q = m->private; \
867 struct deadline_data *dd = q->elevator->elevator_data; \
c807ab52 868 struct dd_per_prio *per_prio = &dd->per_prio[prio]; \
daaadb3e 869 \
c807ab52 870 return seq_list_next(v, &per_prio->fifo_list[data_dir], pos); \
daaadb3e
OS
871} \
872 \
873static void deadline_##name##_fifo_stop(struct seq_file *m, void *v) \
874 __releases(&dd->lock) \
875{ \
876 struct request_queue *q = m->private; \
877 struct deadline_data *dd = q->elevator->elevator_data; \
878 \
879 spin_unlock(&dd->lock); \
880} \
881 \
882static const struct seq_operations deadline_##name##_fifo_seq_ops = { \
883 .start = deadline_##name##_fifo_start, \
884 .next = deadline_##name##_fifo_next, \
885 .stop = deadline_##name##_fifo_stop, \
886 .show = blk_mq_debugfs_rq_show, \
887}; \
888 \
889static int deadline_##name##_next_rq_show(void *data, \
890 struct seq_file *m) \
891{ \
892 struct request_queue *q = data; \
893 struct deadline_data *dd = q->elevator->elevator_data; \
c807ab52
BVA
894 struct dd_per_prio *per_prio = &dd->per_prio[prio]; \
895 struct request *rq = per_prio->next_rq[data_dir]; \
daaadb3e
OS
896 \
897 if (rq) \
898 __blk_mq_debugfs_rq_show(m, rq); \
899 return 0; \
900}
c807ab52
BVA
901
902DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_READ, read0);
903DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_WRITE, write0);
904DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_READ, read1);
905DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_WRITE, write1);
906DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_READ, read2);
907DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_WRITE, write2);
daaadb3e
OS
908#undef DEADLINE_DEBUGFS_DDIR_ATTRS
909
910static int deadline_batching_show(void *data, struct seq_file *m)
911{
912 struct request_queue *q = data;
913 struct deadline_data *dd = q->elevator->elevator_data;
914
915 seq_printf(m, "%u\n", dd->batching);
916 return 0;
917}
918
919static int deadline_starved_show(void *data, struct seq_file *m)
920{
921 struct request_queue *q = data;
922 struct deadline_data *dd = q->elevator->elevator_data;
923
924 seq_printf(m, "%u\n", dd->starved);
925 return 0;
926}
927
07757588
BVA
928static int dd_async_depth_show(void *data, struct seq_file *m)
929{
930 struct request_queue *q = data;
931 struct deadline_data *dd = q->elevator->elevator_data;
932
933 seq_printf(m, "%u\n", dd->async_depth);
934 return 0;
935}
936
38ba64d1
BVA
937static int dd_queued_show(void *data, struct seq_file *m)
938{
939 struct request_queue *q = data;
940 struct deadline_data *dd = q->elevator->elevator_data;
bce0363e
BVA
941 u32 rt, be, idle;
942
943 spin_lock(&dd->lock);
944 rt = dd_queued(dd, DD_RT_PRIO);
945 be = dd_queued(dd, DD_BE_PRIO);
946 idle = dd_queued(dd, DD_IDLE_PRIO);
947 spin_unlock(&dd->lock);
948
949 seq_printf(m, "%u %u %u\n", rt, be, idle);
38ba64d1 950
38ba64d1
BVA
951 return 0;
952}
953
954/* Number of requests owned by the block driver for a given priority. */
955static u32 dd_owned_by_driver(struct deadline_data *dd, enum dd_prio prio)
956{
bce0363e
BVA
957 const struct io_stats_per_prio *stats = &dd->per_prio[prio].stats;
958
959 lockdep_assert_held(&dd->lock);
960
961 return stats->dispatched + stats->merged -
962 atomic_read(&stats->completed);
38ba64d1
BVA
963}
964
965static int dd_owned_by_driver_show(void *data, struct seq_file *m)
966{
967 struct request_queue *q = data;
968 struct deadline_data *dd = q->elevator->elevator_data;
bce0363e
BVA
969 u32 rt, be, idle;
970
971 spin_lock(&dd->lock);
972 rt = dd_owned_by_driver(dd, DD_RT_PRIO);
973 be = dd_owned_by_driver(dd, DD_BE_PRIO);
974 idle = dd_owned_by_driver(dd, DD_IDLE_PRIO);
975 spin_unlock(&dd->lock);
976
977 seq_printf(m, "%u %u %u\n", rt, be, idle);
38ba64d1 978
38ba64d1
BVA
979 return 0;
980}
981
c807ab52
BVA
982#define DEADLINE_DISPATCH_ATTR(prio) \
983static void *deadline_dispatch##prio##_start(struct seq_file *m, \
984 loff_t *pos) \
985 __acquires(&dd->lock) \
986{ \
987 struct request_queue *q = m->private; \
988 struct deadline_data *dd = q->elevator->elevator_data; \
989 struct dd_per_prio *per_prio = &dd->per_prio[prio]; \
990 \
991 spin_lock(&dd->lock); \
992 return seq_list_start(&per_prio->dispatch, *pos); \
993} \
994 \
995static void *deadline_dispatch##prio##_next(struct seq_file *m, \
996 void *v, loff_t *pos) \
997{ \
998 struct request_queue *q = m->private; \
999 struct deadline_data *dd = q->elevator->elevator_data; \
1000 struct dd_per_prio *per_prio = &dd->per_prio[prio]; \
1001 \
1002 return seq_list_next(v, &per_prio->dispatch, pos); \
1003} \
1004 \
1005static void deadline_dispatch##prio##_stop(struct seq_file *m, void *v) \
1006 __releases(&dd->lock) \
1007{ \
1008 struct request_queue *q = m->private; \
1009 struct deadline_data *dd = q->elevator->elevator_data; \
1010 \
1011 spin_unlock(&dd->lock); \
1012} \
1013 \
1014static const struct seq_operations deadline_dispatch##prio##_seq_ops = { \
1015 .start = deadline_dispatch##prio##_start, \
1016 .next = deadline_dispatch##prio##_next, \
1017 .stop = deadline_dispatch##prio##_stop, \
1018 .show = blk_mq_debugfs_rq_show, \
daaadb3e
OS
1019}
1020
c807ab52
BVA
1021DEADLINE_DISPATCH_ATTR(0);
1022DEADLINE_DISPATCH_ATTR(1);
1023DEADLINE_DISPATCH_ATTR(2);
1024#undef DEADLINE_DISPATCH_ATTR
daaadb3e 1025
c807ab52
BVA
1026#define DEADLINE_QUEUE_DDIR_ATTRS(name) \
1027 {#name "_fifo_list", 0400, \
1028 .seq_ops = &deadline_##name##_fifo_seq_ops}
1029#define DEADLINE_NEXT_RQ_ATTR(name) \
daaadb3e
OS
1030 {#name "_next_rq", 0400, deadline_##name##_next_rq_show}
1031static const struct blk_mq_debugfs_attr deadline_queue_debugfs_attrs[] = {
c807ab52
BVA
1032 DEADLINE_QUEUE_DDIR_ATTRS(read0),
1033 DEADLINE_QUEUE_DDIR_ATTRS(write0),
1034 DEADLINE_QUEUE_DDIR_ATTRS(read1),
1035 DEADLINE_QUEUE_DDIR_ATTRS(write1),
1036 DEADLINE_QUEUE_DDIR_ATTRS(read2),
1037 DEADLINE_QUEUE_DDIR_ATTRS(write2),
1038 DEADLINE_NEXT_RQ_ATTR(read0),
1039 DEADLINE_NEXT_RQ_ATTR(write0),
1040 DEADLINE_NEXT_RQ_ATTR(read1),
1041 DEADLINE_NEXT_RQ_ATTR(write1),
1042 DEADLINE_NEXT_RQ_ATTR(read2),
1043 DEADLINE_NEXT_RQ_ATTR(write2),
daaadb3e
OS
1044 {"batching", 0400, deadline_batching_show},
1045 {"starved", 0400, deadline_starved_show},
07757588 1046 {"async_depth", 0400, dd_async_depth_show},
c807ab52
BVA
1047 {"dispatch0", 0400, .seq_ops = &deadline_dispatch0_seq_ops},
1048 {"dispatch1", 0400, .seq_ops = &deadline_dispatch1_seq_ops},
1049 {"dispatch2", 0400, .seq_ops = &deadline_dispatch2_seq_ops},
38ba64d1
BVA
1050 {"owned_by_driver", 0400, dd_owned_by_driver_show},
1051 {"queued", 0400, dd_queued_show},
daaadb3e
OS
1052 {},
1053};
1054#undef DEADLINE_QUEUE_DDIR_ATTRS
1055#endif
1056
945ffb60 1057static struct elevator_type mq_deadline = {
f9cd4bfe 1058 .ops = {
07757588
BVA
1059 .depth_updated = dd_depth_updated,
1060 .limit_depth = dd_limit_depth,
945ffb60 1061 .insert_requests = dd_insert_requests,
c13660a0 1062 .dispatch_request = dd_dispatch_request,
f3bc78d2
DLM
1063 .prepare_request = dd_prepare_request,
1064 .finish_request = dd_finish_request,
945ffb60
JA
1065 .next_request = elv_rb_latter_request,
1066 .former_request = elv_rb_former_request,
1067 .bio_merge = dd_bio_merge,
1068 .request_merge = dd_request_merge,
1069 .requests_merged = dd_merged_requests,
1070 .request_merged = dd_request_merged,
1071 .has_work = dd_has_work,
3e9a99eb
BVA
1072 .init_sched = dd_init_sched,
1073 .exit_sched = dd_exit_sched,
07757588 1074 .init_hctx = dd_init_hctx,
945ffb60
JA
1075 },
1076
daaadb3e
OS
1077#ifdef CONFIG_BLK_DEBUG_FS
1078 .queue_debugfs_attrs = deadline_queue_debugfs_attrs,
1079#endif
945ffb60
JA
1080 .elevator_attrs = deadline_attrs,
1081 .elevator_name = "mq-deadline",
4d740bc9 1082 .elevator_alias = "deadline",
68c43f13 1083 .elevator_features = ELEVATOR_F_ZBD_SEQ_WRITE,
945ffb60
JA
1084 .elevator_owner = THIS_MODULE,
1085};
7de967e7 1086MODULE_ALIAS("mq-deadline-iosched");
945ffb60
JA
1087
1088static int __init deadline_init(void)
1089{
0f783995 1090 return elv_register(&mq_deadline);
945ffb60
JA
1091}
1092
1093static void __exit deadline_exit(void)
1094{
1095 elv_unregister(&mq_deadline);
1096}
1097
1098module_init(deadline_init);
1099module_exit(deadline_exit);
1100
c807ab52 1101MODULE_AUTHOR("Jens Axboe, Damien Le Moal and Bart Van Assche");
945ffb60
JA
1102MODULE_LICENSE("GPL");
1103MODULE_DESCRIPTION("MQ deadline IO scheduler");