Merge tag 'x86_cache_for_6.4' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
[linux-block.git] / block / mq-deadline.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  MQ Deadline i/o scheduler - adaptation of the legacy deadline scheduler,
4  *  for the blk-mq scheduling framework
5  *
6  *  Copyright (C) 2016 Jens Axboe <axboe@kernel.dk>
7  */
8 #include <linux/kernel.h>
9 #include <linux/fs.h>
10 #include <linux/blkdev.h>
11 #include <linux/bio.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/init.h>
15 #include <linux/compiler.h>
16 #include <linux/rbtree.h>
17 #include <linux/sbitmap.h>
18
19 #include <trace/events/block.h>
20
21 #include "elevator.h"
22 #include "blk.h"
23 #include "blk-mq.h"
24 #include "blk-mq-debugfs.h"
25 #include "blk-mq-sched.h"
26
27 /*
28  * See Documentation/block/deadline-iosched.rst
29  */
30 static const int read_expire = HZ / 2;  /* max time before a read is submitted. */
31 static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */
32 /*
33  * Time after which to dispatch lower priority requests even if higher
34  * priority requests are pending.
35  */
36 static const int prio_aging_expire = 10 * HZ;
37 static const int writes_starved = 2;    /* max times reads can starve a write */
38 static const int fifo_batch = 16;       /* # of sequential requests treated as one
39                                      by the above parameters. For throughput. */
40
41 enum dd_data_dir {
42         DD_READ         = READ,
43         DD_WRITE        = WRITE,
44 };
45
46 enum { DD_DIR_COUNT = 2 };
47
48 enum dd_prio {
49         DD_RT_PRIO      = 0,
50         DD_BE_PRIO      = 1,
51         DD_IDLE_PRIO    = 2,
52         DD_PRIO_MAX     = 2,
53 };
54
55 enum { DD_PRIO_COUNT = 3 };
56
57 /*
58  * I/O statistics per I/O priority. It is fine if these counters overflow.
59  * What matters is that these counters are at least as wide as
60  * log2(max_outstanding_requests).
61  */
62 struct io_stats_per_prio {
63         uint32_t inserted;
64         uint32_t merged;
65         uint32_t dispatched;
66         atomic_t completed;
67 };
68
69 /*
70  * Deadline scheduler data per I/O priority (enum dd_prio). Requests are
71  * present on both sort_list[] and fifo_list[].
72  */
73 struct dd_per_prio {
74         struct list_head dispatch;
75         struct rb_root sort_list[DD_DIR_COUNT];
76         struct list_head fifo_list[DD_DIR_COUNT];
77         /* Next request in FIFO order. Read, write or both are NULL. */
78         struct request *next_rq[DD_DIR_COUNT];
79         struct io_stats_per_prio stats;
80 };
81
82 struct deadline_data {
83         /*
84          * run time data
85          */
86
87         struct dd_per_prio per_prio[DD_PRIO_COUNT];
88
89         /* Data direction of latest dispatched request. */
90         enum dd_data_dir last_dir;
91         unsigned int batching;          /* number of sequential requests made */
92         unsigned int starved;           /* times reads have starved writes */
93
94         /*
95          * settings that change how the i/o scheduler behaves
96          */
97         int fifo_expire[DD_DIR_COUNT];
98         int fifo_batch;
99         int writes_starved;
100         int front_merges;
101         u32 async_depth;
102         int prio_aging_expire;
103
104         spinlock_t lock;
105         spinlock_t zone_lock;
106 };
107
108 /* Maps an I/O priority class to a deadline scheduler priority. */
109 static const enum dd_prio ioprio_class_to_prio[] = {
110         [IOPRIO_CLASS_NONE]     = DD_BE_PRIO,
111         [IOPRIO_CLASS_RT]       = DD_RT_PRIO,
112         [IOPRIO_CLASS_BE]       = DD_BE_PRIO,
113         [IOPRIO_CLASS_IDLE]     = DD_IDLE_PRIO,
114 };
115
116 static inline struct rb_root *
117 deadline_rb_root(struct dd_per_prio *per_prio, struct request *rq)
118 {
119         return &per_prio->sort_list[rq_data_dir(rq)];
120 }
121
122 /*
123  * Returns the I/O priority class (IOPRIO_CLASS_*) that has been assigned to a
124  * request.
125  */
126 static u8 dd_rq_ioclass(struct request *rq)
127 {
128         return IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
129 }
130
131 /*
132  * get the request before `rq' in sector-sorted order
133  */
134 static inline struct request *
135 deadline_earlier_request(struct request *rq)
136 {
137         struct rb_node *node = rb_prev(&rq->rb_node);
138
139         if (node)
140                 return rb_entry_rq(node);
141
142         return NULL;
143 }
144
145 /*
146  * get the request after `rq' in sector-sorted order
147  */
148 static inline struct request *
149 deadline_latter_request(struct request *rq)
150 {
151         struct rb_node *node = rb_next(&rq->rb_node);
152
153         if (node)
154                 return rb_entry_rq(node);
155
156         return NULL;
157 }
158
159 static void
160 deadline_add_rq_rb(struct dd_per_prio *per_prio, struct request *rq)
161 {
162         struct rb_root *root = deadline_rb_root(per_prio, rq);
163
164         elv_rb_add(root, rq);
165 }
166
167 static inline void
168 deadline_del_rq_rb(struct dd_per_prio *per_prio, struct request *rq)
169 {
170         const enum dd_data_dir data_dir = rq_data_dir(rq);
171
172         if (per_prio->next_rq[data_dir] == rq)
173                 per_prio->next_rq[data_dir] = deadline_latter_request(rq);
174
175         elv_rb_del(deadline_rb_root(per_prio, rq), rq);
176 }
177
178 /*
179  * remove rq from rbtree and fifo.
180  */
181 static void deadline_remove_request(struct request_queue *q,
182                                     struct dd_per_prio *per_prio,
183                                     struct request *rq)
184 {
185         list_del_init(&rq->queuelist);
186
187         /*
188          * We might not be on the rbtree, if we are doing an insert merge
189          */
190         if (!RB_EMPTY_NODE(&rq->rb_node))
191                 deadline_del_rq_rb(per_prio, rq);
192
193         elv_rqhash_del(q, rq);
194         if (q->last_merge == rq)
195                 q->last_merge = NULL;
196 }
197
198 static void dd_request_merged(struct request_queue *q, struct request *req,
199                               enum elv_merge type)
200 {
201         struct deadline_data *dd = q->elevator->elevator_data;
202         const u8 ioprio_class = dd_rq_ioclass(req);
203         const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
204         struct dd_per_prio *per_prio = &dd->per_prio[prio];
205
206         /*
207          * if the merge was a front merge, we need to reposition request
208          */
209         if (type == ELEVATOR_FRONT_MERGE) {
210                 elv_rb_del(deadline_rb_root(per_prio, req), req);
211                 deadline_add_rq_rb(per_prio, req);
212         }
213 }
214
215 /*
216  * Callback function that is invoked after @next has been merged into @req.
217  */
218 static void dd_merged_requests(struct request_queue *q, struct request *req,
219                                struct request *next)
220 {
221         struct deadline_data *dd = q->elevator->elevator_data;
222         const u8 ioprio_class = dd_rq_ioclass(next);
223         const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
224
225         lockdep_assert_held(&dd->lock);
226
227         dd->per_prio[prio].stats.merged++;
228
229         /*
230          * if next expires before rq, assign its expire time to rq
231          * and move into next position (next will be deleted) in fifo
232          */
233         if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) {
234                 if (time_before((unsigned long)next->fifo_time,
235                                 (unsigned long)req->fifo_time)) {
236                         list_move(&req->queuelist, &next->queuelist);
237                         req->fifo_time = next->fifo_time;
238                 }
239         }
240
241         /*
242          * kill knowledge of next, this one is a goner
243          */
244         deadline_remove_request(q, &dd->per_prio[prio], next);
245 }
246
247 /*
248  * move an entry to dispatch queue
249  */
250 static void
251 deadline_move_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
252                       struct request *rq)
253 {
254         const enum dd_data_dir data_dir = rq_data_dir(rq);
255
256         per_prio->next_rq[data_dir] = deadline_latter_request(rq);
257
258         /*
259          * take it off the sort and fifo list
260          */
261         deadline_remove_request(rq->q, per_prio, rq);
262 }
263
264 /* Number of requests queued for a given priority level. */
265 static u32 dd_queued(struct deadline_data *dd, enum dd_prio prio)
266 {
267         const struct io_stats_per_prio *stats = &dd->per_prio[prio].stats;
268
269         lockdep_assert_held(&dd->lock);
270
271         return stats->inserted - atomic_read(&stats->completed);
272 }
273
274 /*
275  * deadline_check_fifo returns 0 if there are no expired requests on the fifo,
276  * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir])
277  */
278 static inline int deadline_check_fifo(struct dd_per_prio *per_prio,
279                                       enum dd_data_dir data_dir)
280 {
281         struct request *rq = rq_entry_fifo(per_prio->fifo_list[data_dir].next);
282
283         /*
284          * rq is expired!
285          */
286         if (time_after_eq(jiffies, (unsigned long)rq->fifo_time))
287                 return 1;
288
289         return 0;
290 }
291
292 /*
293  * Check if rq has a sequential request preceding it.
294  */
295 static bool deadline_is_seq_write(struct deadline_data *dd, struct request *rq)
296 {
297         struct request *prev = deadline_earlier_request(rq);
298
299         if (!prev)
300                 return false;
301
302         return blk_rq_pos(prev) + blk_rq_sectors(prev) == blk_rq_pos(rq);
303 }
304
305 /*
306  * Skip all write requests that are sequential from @rq, even if we cross
307  * a zone boundary.
308  */
309 static struct request *deadline_skip_seq_writes(struct deadline_data *dd,
310                                                 struct request *rq)
311 {
312         sector_t pos = blk_rq_pos(rq);
313         sector_t skipped_sectors = 0;
314
315         while (rq) {
316                 if (blk_rq_pos(rq) != pos + skipped_sectors)
317                         break;
318                 skipped_sectors += blk_rq_sectors(rq);
319                 rq = deadline_latter_request(rq);
320         }
321
322         return rq;
323 }
324
325 /*
326  * For the specified data direction, return the next request to
327  * dispatch using arrival ordered lists.
328  */
329 static struct request *
330 deadline_fifo_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
331                       enum dd_data_dir data_dir)
332 {
333         struct request *rq;
334         unsigned long flags;
335
336         if (list_empty(&per_prio->fifo_list[data_dir]))
337                 return NULL;
338
339         rq = rq_entry_fifo(per_prio->fifo_list[data_dir].next);
340         if (data_dir == DD_READ || !blk_queue_is_zoned(rq->q))
341                 return rq;
342
343         /*
344          * Look for a write request that can be dispatched, that is one with
345          * an unlocked target zone. For some HDDs, breaking a sequential
346          * write stream can lead to lower throughput, so make sure to preserve
347          * sequential write streams, even if that stream crosses into the next
348          * zones and these zones are unlocked.
349          */
350         spin_lock_irqsave(&dd->zone_lock, flags);
351         list_for_each_entry(rq, &per_prio->fifo_list[DD_WRITE], queuelist) {
352                 if (blk_req_can_dispatch_to_zone(rq) &&
353                     (blk_queue_nonrot(rq->q) ||
354                      !deadline_is_seq_write(dd, rq)))
355                         goto out;
356         }
357         rq = NULL;
358 out:
359         spin_unlock_irqrestore(&dd->zone_lock, flags);
360
361         return rq;
362 }
363
364 /*
365  * For the specified data direction, return the next request to
366  * dispatch using sector position sorted lists.
367  */
368 static struct request *
369 deadline_next_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
370                       enum dd_data_dir data_dir)
371 {
372         struct request *rq;
373         unsigned long flags;
374
375         rq = per_prio->next_rq[data_dir];
376         if (!rq)
377                 return NULL;
378
379         if (data_dir == DD_READ || !blk_queue_is_zoned(rq->q))
380                 return rq;
381
382         /*
383          * Look for a write request that can be dispatched, that is one with
384          * an unlocked target zone. For some HDDs, breaking a sequential
385          * write stream can lead to lower throughput, so make sure to preserve
386          * sequential write streams, even if that stream crosses into the next
387          * zones and these zones are unlocked.
388          */
389         spin_lock_irqsave(&dd->zone_lock, flags);
390         while (rq) {
391                 if (blk_req_can_dispatch_to_zone(rq))
392                         break;
393                 if (blk_queue_nonrot(rq->q))
394                         rq = deadline_latter_request(rq);
395                 else
396                         rq = deadline_skip_seq_writes(dd, rq);
397         }
398         spin_unlock_irqrestore(&dd->zone_lock, flags);
399
400         return rq;
401 }
402
403 /*
404  * Returns true if and only if @rq started after @latest_start where
405  * @latest_start is in jiffies.
406  */
407 static bool started_after(struct deadline_data *dd, struct request *rq,
408                           unsigned long latest_start)
409 {
410         unsigned long start_time = (unsigned long)rq->fifo_time;
411
412         start_time -= dd->fifo_expire[rq_data_dir(rq)];
413
414         return time_after(start_time, latest_start);
415 }
416
417 /*
418  * deadline_dispatch_requests selects the best request according to
419  * read/write expire, fifo_batch, etc and with a start time <= @latest_start.
420  */
421 static struct request *__dd_dispatch_request(struct deadline_data *dd,
422                                              struct dd_per_prio *per_prio,
423                                              unsigned long latest_start)
424 {
425         struct request *rq, *next_rq;
426         enum dd_data_dir data_dir;
427         enum dd_prio prio;
428         u8 ioprio_class;
429
430         lockdep_assert_held(&dd->lock);
431
432         if (!list_empty(&per_prio->dispatch)) {
433                 rq = list_first_entry(&per_prio->dispatch, struct request,
434                                       queuelist);
435                 if (started_after(dd, rq, latest_start))
436                         return NULL;
437                 list_del_init(&rq->queuelist);
438                 goto done;
439         }
440
441         /*
442          * batches are currently reads XOR writes
443          */
444         rq = deadline_next_request(dd, per_prio, dd->last_dir);
445         if (rq && dd->batching < dd->fifo_batch)
446                 /* we have a next request are still entitled to batch */
447                 goto dispatch_request;
448
449         /*
450          * at this point we are not running a batch. select the appropriate
451          * data direction (read / write)
452          */
453
454         if (!list_empty(&per_prio->fifo_list[DD_READ])) {
455                 BUG_ON(RB_EMPTY_ROOT(&per_prio->sort_list[DD_READ]));
456
457                 if (deadline_fifo_request(dd, per_prio, DD_WRITE) &&
458                     (dd->starved++ >= dd->writes_starved))
459                         goto dispatch_writes;
460
461                 data_dir = DD_READ;
462
463                 goto dispatch_find_request;
464         }
465
466         /*
467          * there are either no reads or writes have been starved
468          */
469
470         if (!list_empty(&per_prio->fifo_list[DD_WRITE])) {
471 dispatch_writes:
472                 BUG_ON(RB_EMPTY_ROOT(&per_prio->sort_list[DD_WRITE]));
473
474                 dd->starved = 0;
475
476                 data_dir = DD_WRITE;
477
478                 goto dispatch_find_request;
479         }
480
481         return NULL;
482
483 dispatch_find_request:
484         /*
485          * we are not running a batch, find best request for selected data_dir
486          */
487         next_rq = deadline_next_request(dd, per_prio, data_dir);
488         if (deadline_check_fifo(per_prio, data_dir) || !next_rq) {
489                 /*
490                  * A deadline has expired, the last request was in the other
491                  * direction, or we have run out of higher-sectored requests.
492                  * Start again from the request with the earliest expiry time.
493                  */
494                 rq = deadline_fifo_request(dd, per_prio, data_dir);
495         } else {
496                 /*
497                  * The last req was the same dir and we have a next request in
498                  * sort order. No expired requests so continue on from here.
499                  */
500                 rq = next_rq;
501         }
502
503         /*
504          * For a zoned block device, if we only have writes queued and none of
505          * them can be dispatched, rq will be NULL.
506          */
507         if (!rq)
508                 return NULL;
509
510         dd->last_dir = data_dir;
511         dd->batching = 0;
512
513 dispatch_request:
514         if (started_after(dd, rq, latest_start))
515                 return NULL;
516
517         /*
518          * rq is the selected appropriate request.
519          */
520         dd->batching++;
521         deadline_move_request(dd, per_prio, rq);
522 done:
523         ioprio_class = dd_rq_ioclass(rq);
524         prio = ioprio_class_to_prio[ioprio_class];
525         dd->per_prio[prio].stats.dispatched++;
526         /*
527          * If the request needs its target zone locked, do it.
528          */
529         blk_req_zone_write_lock(rq);
530         rq->rq_flags |= RQF_STARTED;
531         return rq;
532 }
533
534 /*
535  * Check whether there are any requests with priority other than DD_RT_PRIO
536  * that were inserted more than prio_aging_expire jiffies ago.
537  */
538 static struct request *dd_dispatch_prio_aged_requests(struct deadline_data *dd,
539                                                       unsigned long now)
540 {
541         struct request *rq;
542         enum dd_prio prio;
543         int prio_cnt;
544
545         lockdep_assert_held(&dd->lock);
546
547         prio_cnt = !!dd_queued(dd, DD_RT_PRIO) + !!dd_queued(dd, DD_BE_PRIO) +
548                    !!dd_queued(dd, DD_IDLE_PRIO);
549         if (prio_cnt < 2)
550                 return NULL;
551
552         for (prio = DD_BE_PRIO; prio <= DD_PRIO_MAX; prio++) {
553                 rq = __dd_dispatch_request(dd, &dd->per_prio[prio],
554                                            now - dd->prio_aging_expire);
555                 if (rq)
556                         return rq;
557         }
558
559         return NULL;
560 }
561
562 /*
563  * Called from blk_mq_run_hw_queue() -> __blk_mq_sched_dispatch_requests().
564  *
565  * One confusing aspect here is that we get called for a specific
566  * hardware queue, but we may return a request that is for a
567  * different hardware queue. This is because mq-deadline has shared
568  * state for all hardware queues, in terms of sorting, FIFOs, etc.
569  */
570 static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
571 {
572         struct deadline_data *dd = hctx->queue->elevator->elevator_data;
573         const unsigned long now = jiffies;
574         struct request *rq;
575         enum dd_prio prio;
576
577         spin_lock(&dd->lock);
578         rq = dd_dispatch_prio_aged_requests(dd, now);
579         if (rq)
580                 goto unlock;
581
582         /*
583          * Next, dispatch requests in priority order. Ignore lower priority
584          * requests if any higher priority requests are pending.
585          */
586         for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
587                 rq = __dd_dispatch_request(dd, &dd->per_prio[prio], now);
588                 if (rq || dd_queued(dd, prio))
589                         break;
590         }
591
592 unlock:
593         spin_unlock(&dd->lock);
594
595         return rq;
596 }
597
598 /*
599  * Called by __blk_mq_alloc_request(). The shallow_depth value set by this
600  * function is used by __blk_mq_get_tag().
601  */
602 static void dd_limit_depth(blk_opf_t opf, struct blk_mq_alloc_data *data)
603 {
604         struct deadline_data *dd = data->q->elevator->elevator_data;
605
606         /* Do not throttle synchronous reads. */
607         if (op_is_sync(opf) && !op_is_write(opf))
608                 return;
609
610         /*
611          * Throttle asynchronous requests and writes such that these requests
612          * do not block the allocation of synchronous requests.
613          */
614         data->shallow_depth = dd->async_depth;
615 }
616
617 /* Called by blk_mq_update_nr_requests(). */
618 static void dd_depth_updated(struct blk_mq_hw_ctx *hctx)
619 {
620         struct request_queue *q = hctx->queue;
621         struct deadline_data *dd = q->elevator->elevator_data;
622         struct blk_mq_tags *tags = hctx->sched_tags;
623
624         dd->async_depth = max(1UL, 3 * q->nr_requests / 4);
625
626         sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, dd->async_depth);
627 }
628
629 /* Called by blk_mq_init_hctx() and blk_mq_init_sched(). */
630 static int dd_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
631 {
632         dd_depth_updated(hctx);
633         return 0;
634 }
635
636 static void dd_exit_sched(struct elevator_queue *e)
637 {
638         struct deadline_data *dd = e->elevator_data;
639         enum dd_prio prio;
640
641         for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
642                 struct dd_per_prio *per_prio = &dd->per_prio[prio];
643                 const struct io_stats_per_prio *stats = &per_prio->stats;
644                 uint32_t queued;
645
646                 WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_READ]));
647                 WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_WRITE]));
648
649                 spin_lock(&dd->lock);
650                 queued = dd_queued(dd, prio);
651                 spin_unlock(&dd->lock);
652
653                 WARN_ONCE(queued != 0,
654                           "statistics for priority %d: i %u m %u d %u c %u\n",
655                           prio, stats->inserted, stats->merged,
656                           stats->dispatched, atomic_read(&stats->completed));
657         }
658
659         kfree(dd);
660 }
661
662 /*
663  * initialize elevator private data (deadline_data).
664  */
665 static int dd_init_sched(struct request_queue *q, struct elevator_type *e)
666 {
667         struct deadline_data *dd;
668         struct elevator_queue *eq;
669         enum dd_prio prio;
670         int ret = -ENOMEM;
671
672         eq = elevator_alloc(q, e);
673         if (!eq)
674                 return ret;
675
676         dd = kzalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
677         if (!dd)
678                 goto put_eq;
679
680         eq->elevator_data = dd;
681
682         for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
683                 struct dd_per_prio *per_prio = &dd->per_prio[prio];
684
685                 INIT_LIST_HEAD(&per_prio->dispatch);
686                 INIT_LIST_HEAD(&per_prio->fifo_list[DD_READ]);
687                 INIT_LIST_HEAD(&per_prio->fifo_list[DD_WRITE]);
688                 per_prio->sort_list[DD_READ] = RB_ROOT;
689                 per_prio->sort_list[DD_WRITE] = RB_ROOT;
690         }
691         dd->fifo_expire[DD_READ] = read_expire;
692         dd->fifo_expire[DD_WRITE] = write_expire;
693         dd->writes_starved = writes_starved;
694         dd->front_merges = 1;
695         dd->last_dir = DD_WRITE;
696         dd->fifo_batch = fifo_batch;
697         dd->prio_aging_expire = prio_aging_expire;
698         spin_lock_init(&dd->lock);
699         spin_lock_init(&dd->zone_lock);
700
701         /* We dispatch from request queue wide instead of hw queue */
702         blk_queue_flag_set(QUEUE_FLAG_SQ_SCHED, q);
703
704         q->elevator = eq;
705         return 0;
706
707 put_eq:
708         kobject_put(&eq->kobj);
709         return ret;
710 }
711
712 /*
713  * Try to merge @bio into an existing request. If @bio has been merged into
714  * an existing request, store the pointer to that request into *@rq.
715  */
716 static int dd_request_merge(struct request_queue *q, struct request **rq,
717                             struct bio *bio)
718 {
719         struct deadline_data *dd = q->elevator->elevator_data;
720         const u8 ioprio_class = IOPRIO_PRIO_CLASS(bio->bi_ioprio);
721         const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
722         struct dd_per_prio *per_prio = &dd->per_prio[prio];
723         sector_t sector = bio_end_sector(bio);
724         struct request *__rq;
725
726         if (!dd->front_merges)
727                 return ELEVATOR_NO_MERGE;
728
729         __rq = elv_rb_find(&per_prio->sort_list[bio_data_dir(bio)], sector);
730         if (__rq) {
731                 BUG_ON(sector != blk_rq_pos(__rq));
732
733                 if (elv_bio_merge_ok(__rq, bio)) {
734                         *rq = __rq;
735                         if (blk_discard_mergable(__rq))
736                                 return ELEVATOR_DISCARD_MERGE;
737                         return ELEVATOR_FRONT_MERGE;
738                 }
739         }
740
741         return ELEVATOR_NO_MERGE;
742 }
743
744 /*
745  * Attempt to merge a bio into an existing request. This function is called
746  * before @bio is associated with a request.
747  */
748 static bool dd_bio_merge(struct request_queue *q, struct bio *bio,
749                 unsigned int nr_segs)
750 {
751         struct deadline_data *dd = q->elevator->elevator_data;
752         struct request *free = NULL;
753         bool ret;
754
755         spin_lock(&dd->lock);
756         ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free);
757         spin_unlock(&dd->lock);
758
759         if (free)
760                 blk_mq_free_request(free);
761
762         return ret;
763 }
764
765 /*
766  * add rq to rbtree and fifo
767  */
768 static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
769                               blk_insert_t flags)
770 {
771         struct request_queue *q = hctx->queue;
772         struct deadline_data *dd = q->elevator->elevator_data;
773         const enum dd_data_dir data_dir = rq_data_dir(rq);
774         u16 ioprio = req_get_ioprio(rq);
775         u8 ioprio_class = IOPRIO_PRIO_CLASS(ioprio);
776         struct dd_per_prio *per_prio;
777         enum dd_prio prio;
778         LIST_HEAD(free);
779
780         lockdep_assert_held(&dd->lock);
781
782         /*
783          * This may be a requeue of a write request that has locked its
784          * target zone. If it is the case, this releases the zone lock.
785          */
786         blk_req_zone_write_unlock(rq);
787
788         prio = ioprio_class_to_prio[ioprio_class];
789         per_prio = &dd->per_prio[prio];
790         if (!rq->elv.priv[0]) {
791                 per_prio->stats.inserted++;
792                 rq->elv.priv[0] = (void *)(uintptr_t)1;
793         }
794
795         if (blk_mq_sched_try_insert_merge(q, rq, &free)) {
796                 blk_mq_free_requests(&free);
797                 return;
798         }
799
800         trace_block_rq_insert(rq);
801
802         if (flags & BLK_MQ_INSERT_AT_HEAD) {
803                 list_add(&rq->queuelist, &per_prio->dispatch);
804                 rq->fifo_time = jiffies;
805         } else {
806                 deadline_add_rq_rb(per_prio, rq);
807
808                 if (rq_mergeable(rq)) {
809                         elv_rqhash_add(q, rq);
810                         if (!q->last_merge)
811                                 q->last_merge = rq;
812                 }
813
814                 /*
815                  * set expire time and add to fifo list
816                  */
817                 rq->fifo_time = jiffies + dd->fifo_expire[data_dir];
818                 list_add_tail(&rq->queuelist, &per_prio->fifo_list[data_dir]);
819         }
820 }
821
822 /*
823  * Called from blk_mq_insert_request() or blk_mq_dispatch_plug_list().
824  */
825 static void dd_insert_requests(struct blk_mq_hw_ctx *hctx,
826                                struct list_head *list,
827                                blk_insert_t flags)
828 {
829         struct request_queue *q = hctx->queue;
830         struct deadline_data *dd = q->elevator->elevator_data;
831
832         spin_lock(&dd->lock);
833         while (!list_empty(list)) {
834                 struct request *rq;
835
836                 rq = list_first_entry(list, struct request, queuelist);
837                 list_del_init(&rq->queuelist);
838                 dd_insert_request(hctx, rq, flags);
839         }
840         spin_unlock(&dd->lock);
841 }
842
843 /* Callback from inside blk_mq_rq_ctx_init(). */
844 static void dd_prepare_request(struct request *rq)
845 {
846         rq->elv.priv[0] = NULL;
847 }
848
849 static bool dd_has_write_work(struct blk_mq_hw_ctx *hctx)
850 {
851         struct deadline_data *dd = hctx->queue->elevator->elevator_data;
852         enum dd_prio p;
853
854         for (p = 0; p <= DD_PRIO_MAX; p++)
855                 if (!list_empty_careful(&dd->per_prio[p].fifo_list[DD_WRITE]))
856                         return true;
857
858         return false;
859 }
860
861 /*
862  * Callback from inside blk_mq_free_request().
863  *
864  * For zoned block devices, write unlock the target zone of
865  * completed write requests. Do this while holding the zone lock
866  * spinlock so that the zone is never unlocked while deadline_fifo_request()
867  * or deadline_next_request() are executing. This function is called for
868  * all requests, whether or not these requests complete successfully.
869  *
870  * For a zoned block device, __dd_dispatch_request() may have stopped
871  * dispatching requests if all the queued requests are write requests directed
872  * at zones that are already locked due to on-going write requests. To ensure
873  * write request dispatch progress in this case, mark the queue as needing a
874  * restart to ensure that the queue is run again after completion of the
875  * request and zones being unlocked.
876  */
877 static void dd_finish_request(struct request *rq)
878 {
879         struct request_queue *q = rq->q;
880         struct deadline_data *dd = q->elevator->elevator_data;
881         const u8 ioprio_class = dd_rq_ioclass(rq);
882         const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
883         struct dd_per_prio *per_prio = &dd->per_prio[prio];
884
885         /*
886          * The block layer core may call dd_finish_request() without having
887          * called dd_insert_requests(). Skip requests that bypassed I/O
888          * scheduling. See also blk_mq_request_bypass_insert().
889          */
890         if (!rq->elv.priv[0])
891                 return;
892
893         atomic_inc(&per_prio->stats.completed);
894
895         if (blk_queue_is_zoned(q)) {
896                 unsigned long flags;
897
898                 spin_lock_irqsave(&dd->zone_lock, flags);
899                 blk_req_zone_write_unlock(rq);
900                 spin_unlock_irqrestore(&dd->zone_lock, flags);
901
902                 if (dd_has_write_work(rq->mq_hctx))
903                         blk_mq_sched_mark_restart_hctx(rq->mq_hctx);
904         }
905 }
906
907 static bool dd_has_work_for_prio(struct dd_per_prio *per_prio)
908 {
909         return !list_empty_careful(&per_prio->dispatch) ||
910                 !list_empty_careful(&per_prio->fifo_list[DD_READ]) ||
911                 !list_empty_careful(&per_prio->fifo_list[DD_WRITE]);
912 }
913
914 static bool dd_has_work(struct blk_mq_hw_ctx *hctx)
915 {
916         struct deadline_data *dd = hctx->queue->elevator->elevator_data;
917         enum dd_prio prio;
918
919         for (prio = 0; prio <= DD_PRIO_MAX; prio++)
920                 if (dd_has_work_for_prio(&dd->per_prio[prio]))
921                         return true;
922
923         return false;
924 }
925
926 /*
927  * sysfs parts below
928  */
929 #define SHOW_INT(__FUNC, __VAR)                                         \
930 static ssize_t __FUNC(struct elevator_queue *e, char *page)             \
931 {                                                                       \
932         struct deadline_data *dd = e->elevator_data;                    \
933                                                                         \
934         return sysfs_emit(page, "%d\n", __VAR);                         \
935 }
936 #define SHOW_JIFFIES(__FUNC, __VAR) SHOW_INT(__FUNC, jiffies_to_msecs(__VAR))
937 SHOW_JIFFIES(deadline_read_expire_show, dd->fifo_expire[DD_READ]);
938 SHOW_JIFFIES(deadline_write_expire_show, dd->fifo_expire[DD_WRITE]);
939 SHOW_JIFFIES(deadline_prio_aging_expire_show, dd->prio_aging_expire);
940 SHOW_INT(deadline_writes_starved_show, dd->writes_starved);
941 SHOW_INT(deadline_front_merges_show, dd->front_merges);
942 SHOW_INT(deadline_async_depth_show, dd->async_depth);
943 SHOW_INT(deadline_fifo_batch_show, dd->fifo_batch);
944 #undef SHOW_INT
945 #undef SHOW_JIFFIES
946
947 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)                 \
948 static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
949 {                                                                       \
950         struct deadline_data *dd = e->elevator_data;                    \
951         int __data, __ret;                                              \
952                                                                         \
953         __ret = kstrtoint(page, 0, &__data);                            \
954         if (__ret < 0)                                                  \
955                 return __ret;                                           \
956         if (__data < (MIN))                                             \
957                 __data = (MIN);                                         \
958         else if (__data > (MAX))                                        \
959                 __data = (MAX);                                         \
960         *(__PTR) = __CONV(__data);                                      \
961         return count;                                                   \
962 }
963 #define STORE_INT(__FUNC, __PTR, MIN, MAX)                              \
964         STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, )
965 #define STORE_JIFFIES(__FUNC, __PTR, MIN, MAX)                          \
966         STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, msecs_to_jiffies)
967 STORE_JIFFIES(deadline_read_expire_store, &dd->fifo_expire[DD_READ], 0, INT_MAX);
968 STORE_JIFFIES(deadline_write_expire_store, &dd->fifo_expire[DD_WRITE], 0, INT_MAX);
969 STORE_JIFFIES(deadline_prio_aging_expire_store, &dd->prio_aging_expire, 0, INT_MAX);
970 STORE_INT(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX);
971 STORE_INT(deadline_front_merges_store, &dd->front_merges, 0, 1);
972 STORE_INT(deadline_async_depth_store, &dd->async_depth, 1, INT_MAX);
973 STORE_INT(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX);
974 #undef STORE_FUNCTION
975 #undef STORE_INT
976 #undef STORE_JIFFIES
977
978 #define DD_ATTR(name) \
979         __ATTR(name, 0644, deadline_##name##_show, deadline_##name##_store)
980
981 static struct elv_fs_entry deadline_attrs[] = {
982         DD_ATTR(read_expire),
983         DD_ATTR(write_expire),
984         DD_ATTR(writes_starved),
985         DD_ATTR(front_merges),
986         DD_ATTR(async_depth),
987         DD_ATTR(fifo_batch),
988         DD_ATTR(prio_aging_expire),
989         __ATTR_NULL
990 };
991
992 #ifdef CONFIG_BLK_DEBUG_FS
993 #define DEADLINE_DEBUGFS_DDIR_ATTRS(prio, data_dir, name)               \
994 static void *deadline_##name##_fifo_start(struct seq_file *m,           \
995                                           loff_t *pos)                  \
996         __acquires(&dd->lock)                                           \
997 {                                                                       \
998         struct request_queue *q = m->private;                           \
999         struct deadline_data *dd = q->elevator->elevator_data;          \
1000         struct dd_per_prio *per_prio = &dd->per_prio[prio];             \
1001                                                                         \
1002         spin_lock(&dd->lock);                                           \
1003         return seq_list_start(&per_prio->fifo_list[data_dir], *pos);    \
1004 }                                                                       \
1005                                                                         \
1006 static void *deadline_##name##_fifo_next(struct seq_file *m, void *v,   \
1007                                          loff_t *pos)                   \
1008 {                                                                       \
1009         struct request_queue *q = m->private;                           \
1010         struct deadline_data *dd = q->elevator->elevator_data;          \
1011         struct dd_per_prio *per_prio = &dd->per_prio[prio];             \
1012                                                                         \
1013         return seq_list_next(v, &per_prio->fifo_list[data_dir], pos);   \
1014 }                                                                       \
1015                                                                         \
1016 static void deadline_##name##_fifo_stop(struct seq_file *m, void *v)    \
1017         __releases(&dd->lock)                                           \
1018 {                                                                       \
1019         struct request_queue *q = m->private;                           \
1020         struct deadline_data *dd = q->elevator->elevator_data;          \
1021                                                                         \
1022         spin_unlock(&dd->lock);                                         \
1023 }                                                                       \
1024                                                                         \
1025 static const struct seq_operations deadline_##name##_fifo_seq_ops = {   \
1026         .start  = deadline_##name##_fifo_start,                         \
1027         .next   = deadline_##name##_fifo_next,                          \
1028         .stop   = deadline_##name##_fifo_stop,                          \
1029         .show   = blk_mq_debugfs_rq_show,                               \
1030 };                                                                      \
1031                                                                         \
1032 static int deadline_##name##_next_rq_show(void *data,                   \
1033                                           struct seq_file *m)           \
1034 {                                                                       \
1035         struct request_queue *q = data;                                 \
1036         struct deadline_data *dd = q->elevator->elevator_data;          \
1037         struct dd_per_prio *per_prio = &dd->per_prio[prio];             \
1038         struct request *rq = per_prio->next_rq[data_dir];               \
1039                                                                         \
1040         if (rq)                                                         \
1041                 __blk_mq_debugfs_rq_show(m, rq);                        \
1042         return 0;                                                       \
1043 }
1044
1045 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_READ, read0);
1046 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_WRITE, write0);
1047 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_READ, read1);
1048 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_WRITE, write1);
1049 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_READ, read2);
1050 DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_WRITE, write2);
1051 #undef DEADLINE_DEBUGFS_DDIR_ATTRS
1052
1053 static int deadline_batching_show(void *data, struct seq_file *m)
1054 {
1055         struct request_queue *q = data;
1056         struct deadline_data *dd = q->elevator->elevator_data;
1057
1058         seq_printf(m, "%u\n", dd->batching);
1059         return 0;
1060 }
1061
1062 static int deadline_starved_show(void *data, struct seq_file *m)
1063 {
1064         struct request_queue *q = data;
1065         struct deadline_data *dd = q->elevator->elevator_data;
1066
1067         seq_printf(m, "%u\n", dd->starved);
1068         return 0;
1069 }
1070
1071 static int dd_async_depth_show(void *data, struct seq_file *m)
1072 {
1073         struct request_queue *q = data;
1074         struct deadline_data *dd = q->elevator->elevator_data;
1075
1076         seq_printf(m, "%u\n", dd->async_depth);
1077         return 0;
1078 }
1079
1080 static int dd_queued_show(void *data, struct seq_file *m)
1081 {
1082         struct request_queue *q = data;
1083         struct deadline_data *dd = q->elevator->elevator_data;
1084         u32 rt, be, idle;
1085
1086         spin_lock(&dd->lock);
1087         rt = dd_queued(dd, DD_RT_PRIO);
1088         be = dd_queued(dd, DD_BE_PRIO);
1089         idle = dd_queued(dd, DD_IDLE_PRIO);
1090         spin_unlock(&dd->lock);
1091
1092         seq_printf(m, "%u %u %u\n", rt, be, idle);
1093
1094         return 0;
1095 }
1096
1097 /* Number of requests owned by the block driver for a given priority. */
1098 static u32 dd_owned_by_driver(struct deadline_data *dd, enum dd_prio prio)
1099 {
1100         const struct io_stats_per_prio *stats = &dd->per_prio[prio].stats;
1101
1102         lockdep_assert_held(&dd->lock);
1103
1104         return stats->dispatched + stats->merged -
1105                 atomic_read(&stats->completed);
1106 }
1107
1108 static int dd_owned_by_driver_show(void *data, struct seq_file *m)
1109 {
1110         struct request_queue *q = data;
1111         struct deadline_data *dd = q->elevator->elevator_data;
1112         u32 rt, be, idle;
1113
1114         spin_lock(&dd->lock);
1115         rt = dd_owned_by_driver(dd, DD_RT_PRIO);
1116         be = dd_owned_by_driver(dd, DD_BE_PRIO);
1117         idle = dd_owned_by_driver(dd, DD_IDLE_PRIO);
1118         spin_unlock(&dd->lock);
1119
1120         seq_printf(m, "%u %u %u\n", rt, be, idle);
1121
1122         return 0;
1123 }
1124
1125 #define DEADLINE_DISPATCH_ATTR(prio)                                    \
1126 static void *deadline_dispatch##prio##_start(struct seq_file *m,        \
1127                                              loff_t *pos)               \
1128         __acquires(&dd->lock)                                           \
1129 {                                                                       \
1130         struct request_queue *q = m->private;                           \
1131         struct deadline_data *dd = q->elevator->elevator_data;          \
1132         struct dd_per_prio *per_prio = &dd->per_prio[prio];             \
1133                                                                         \
1134         spin_lock(&dd->lock);                                           \
1135         return seq_list_start(&per_prio->dispatch, *pos);               \
1136 }                                                                       \
1137                                                                         \
1138 static void *deadline_dispatch##prio##_next(struct seq_file *m,         \
1139                                             void *v, loff_t *pos)       \
1140 {                                                                       \
1141         struct request_queue *q = m->private;                           \
1142         struct deadline_data *dd = q->elevator->elevator_data;          \
1143         struct dd_per_prio *per_prio = &dd->per_prio[prio];             \
1144                                                                         \
1145         return seq_list_next(v, &per_prio->dispatch, pos);              \
1146 }                                                                       \
1147                                                                         \
1148 static void deadline_dispatch##prio##_stop(struct seq_file *m, void *v) \
1149         __releases(&dd->lock)                                           \
1150 {                                                                       \
1151         struct request_queue *q = m->private;                           \
1152         struct deadline_data *dd = q->elevator->elevator_data;          \
1153                                                                         \
1154         spin_unlock(&dd->lock);                                         \
1155 }                                                                       \
1156                                                                         \
1157 static const struct seq_operations deadline_dispatch##prio##_seq_ops = { \
1158         .start  = deadline_dispatch##prio##_start,                      \
1159         .next   = deadline_dispatch##prio##_next,                       \
1160         .stop   = deadline_dispatch##prio##_stop,                       \
1161         .show   = blk_mq_debugfs_rq_show,                               \
1162 }
1163
1164 DEADLINE_DISPATCH_ATTR(0);
1165 DEADLINE_DISPATCH_ATTR(1);
1166 DEADLINE_DISPATCH_ATTR(2);
1167 #undef DEADLINE_DISPATCH_ATTR
1168
1169 #define DEADLINE_QUEUE_DDIR_ATTRS(name)                                 \
1170         {#name "_fifo_list", 0400,                                      \
1171                         .seq_ops = &deadline_##name##_fifo_seq_ops}
1172 #define DEADLINE_NEXT_RQ_ATTR(name)                                     \
1173         {#name "_next_rq", 0400, deadline_##name##_next_rq_show}
1174 static const struct blk_mq_debugfs_attr deadline_queue_debugfs_attrs[] = {
1175         DEADLINE_QUEUE_DDIR_ATTRS(read0),
1176         DEADLINE_QUEUE_DDIR_ATTRS(write0),
1177         DEADLINE_QUEUE_DDIR_ATTRS(read1),
1178         DEADLINE_QUEUE_DDIR_ATTRS(write1),
1179         DEADLINE_QUEUE_DDIR_ATTRS(read2),
1180         DEADLINE_QUEUE_DDIR_ATTRS(write2),
1181         DEADLINE_NEXT_RQ_ATTR(read0),
1182         DEADLINE_NEXT_RQ_ATTR(write0),
1183         DEADLINE_NEXT_RQ_ATTR(read1),
1184         DEADLINE_NEXT_RQ_ATTR(write1),
1185         DEADLINE_NEXT_RQ_ATTR(read2),
1186         DEADLINE_NEXT_RQ_ATTR(write2),
1187         {"batching", 0400, deadline_batching_show},
1188         {"starved", 0400, deadline_starved_show},
1189         {"async_depth", 0400, dd_async_depth_show},
1190         {"dispatch0", 0400, .seq_ops = &deadline_dispatch0_seq_ops},
1191         {"dispatch1", 0400, .seq_ops = &deadline_dispatch1_seq_ops},
1192         {"dispatch2", 0400, .seq_ops = &deadline_dispatch2_seq_ops},
1193         {"owned_by_driver", 0400, dd_owned_by_driver_show},
1194         {"queued", 0400, dd_queued_show},
1195         {},
1196 };
1197 #undef DEADLINE_QUEUE_DDIR_ATTRS
1198 #endif
1199
1200 static struct elevator_type mq_deadline = {
1201         .ops = {
1202                 .depth_updated          = dd_depth_updated,
1203                 .limit_depth            = dd_limit_depth,
1204                 .insert_requests        = dd_insert_requests,
1205                 .dispatch_request       = dd_dispatch_request,
1206                 .prepare_request        = dd_prepare_request,
1207                 .finish_request         = dd_finish_request,
1208                 .next_request           = elv_rb_latter_request,
1209                 .former_request         = elv_rb_former_request,
1210                 .bio_merge              = dd_bio_merge,
1211                 .request_merge          = dd_request_merge,
1212                 .requests_merged        = dd_merged_requests,
1213                 .request_merged         = dd_request_merged,
1214                 .has_work               = dd_has_work,
1215                 .init_sched             = dd_init_sched,
1216                 .exit_sched             = dd_exit_sched,
1217                 .init_hctx              = dd_init_hctx,
1218         },
1219
1220 #ifdef CONFIG_BLK_DEBUG_FS
1221         .queue_debugfs_attrs = deadline_queue_debugfs_attrs,
1222 #endif
1223         .elevator_attrs = deadline_attrs,
1224         .elevator_name = "mq-deadline",
1225         .elevator_alias = "deadline",
1226         .elevator_features = ELEVATOR_F_ZBD_SEQ_WRITE,
1227         .elevator_owner = THIS_MODULE,
1228 };
1229 MODULE_ALIAS("mq-deadline-iosched");
1230
1231 static int __init deadline_init(void)
1232 {
1233         return elv_register(&mq_deadline);
1234 }
1235
1236 static void __exit deadline_exit(void)
1237 {
1238         elv_unregister(&mq_deadline);
1239 }
1240
1241 module_init(deadline_init);
1242 module_exit(deadline_exit);
1243
1244 MODULE_AUTHOR("Jens Axboe, Damien Le Moal and Bart Van Assche");
1245 MODULE_LICENSE("GPL");
1246 MODULE_DESCRIPTION("MQ deadline IO scheduler");