Commit | Line | Data |
---|---|---|
3dcf60bc | 1 | // SPDX-License-Identifier: GPL-2.0 |
1da177e4 | 2 | /* |
1da177e4 LT |
3 | * Block device elevator/IO-scheduler. |
4 | * | |
5 | * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE | |
6 | * | |
0fe23479 | 7 | * 30042000 Jens Axboe <axboe@kernel.dk> : |
1da177e4 LT |
8 | * |
9 | * Split the elevator a bit so that it is possible to choose a different | |
10 | * one or even write a new "plug in". There are three pieces: | |
11 | * - elevator_fn, inserts a new request in the queue list | |
12 | * - elevator_merge_fn, decides whether a new buffer can be merged with | |
13 | * an existing request | |
14 | * - elevator_dequeue_fn, called when a request is taken off the active list | |
15 | * | |
16 | * 20082000 Dave Jones <davej@suse.de> : | |
17 | * Removed tests for max-bomb-segments, which was breaking elvtune | |
18 | * when run without -bN | |
19 | * | |
20 | * Jens: | |
21 | * - Rework again to work with bio instead of buffer_heads | |
22 | * - loose bi_dev comparisons, partition handling is right now | |
23 | * - completely modularize elevator setup and teardown | |
24 | * | |
25 | */ | |
26 | #include <linux/kernel.h> | |
27 | #include <linux/fs.h> | |
28 | #include <linux/blkdev.h> | |
1da177e4 | 29 | #include <linux/bio.h> |
1da177e4 LT |
30 | #include <linux/module.h> |
31 | #include <linux/slab.h> | |
32 | #include <linux/init.h> | |
33 | #include <linux/compiler.h> | |
2056a782 | 34 | #include <linux/blktrace_api.h> |
9817064b | 35 | #include <linux/hash.h> |
0835da67 | 36 | #include <linux/uaccess.h> |
c8158819 | 37 | #include <linux/pm_runtime.h> |
1da177e4 | 38 | |
55782138 LZ |
39 | #include <trace/events/block.h> |
40 | ||
2e9bc346 | 41 | #include "elevator.h" |
242f9dcb | 42 | #include "blk.h" |
bd166ef1 | 43 | #include "blk-mq-sched.h" |
bca6b067 | 44 | #include "blk-pm.h" |
8330cdb0 | 45 | #include "blk-wbt.h" |
672fdcf0 | 46 | #include "blk-cgroup.h" |
242f9dcb | 47 | |
1e9db5c4 ML |
48 | /* Holding context data for changing elevator */ |
49 | struct elv_change_ctx { | |
50 | const char *name; | |
51 | bool no_uevent; | |
559dc111 ML |
52 | |
53 | /* for unregistering old elevator */ | |
54 | struct elevator_queue *old; | |
55 | /* for registering new elevator */ | |
56 | struct elevator_queue *new; | |
1e9db5c4 ML |
57 | }; |
58 | ||
1da177e4 LT |
59 | static DEFINE_SPINLOCK(elv_list_lock); |
60 | static LIST_HEAD(elv_list); | |
61 | ||
9817064b JA |
62 | /* |
63 | * Merge hash stuff. | |
64 | */ | |
83096ebf | 65 | #define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq)) |
9817064b | 66 | |
da775265 JA |
67 | /* |
68 | * Query io scheduler to see if the current process issuing bio may be | |
69 | * merged with rq. | |
70 | */ | |
8d283ee6 | 71 | static bool elv_iosched_allow_bio_merge(struct request *rq, struct bio *bio) |
da775265 | 72 | { |
165125e1 | 73 | struct request_queue *q = rq->q; |
b374d18a | 74 | struct elevator_queue *e = q->elevator; |
da775265 | 75 | |
f9cd4bfe JA |
76 | if (e->type->ops.allow_merge) |
77 | return e->type->ops.allow_merge(q, rq, bio); | |
da775265 | 78 | |
8d283ee6 | 79 | return true; |
da775265 JA |
80 | } |
81 | ||
1da177e4 LT |
82 | /* |
83 | * can we safely merge with this request? | |
84 | */ | |
72ef799b | 85 | bool elv_bio_merge_ok(struct request *rq, struct bio *bio) |
1da177e4 | 86 | { |
050c8ea8 | 87 | if (!blk_rq_merge_ok(rq, bio)) |
72ef799b | 88 | return false; |
7ba1ba12 | 89 | |
72ef799b TE |
90 | if (!elv_iosched_allow_bio_merge(rq, bio)) |
91 | return false; | |
1da177e4 | 92 | |
72ef799b | 93 | return true; |
1da177e4 | 94 | } |
72ef799b | 95 | EXPORT_SYMBOL(elv_bio_merge_ok); |
1da177e4 | 96 | |
68c43f13 | 97 | /** |
f69b5e8f | 98 | * elevator_match - Check whether @e's name or alias matches @name |
68c43f13 DLM |
99 | * @e: Scheduler to test |
100 | * @name: Elevator name to test | |
68c43f13 | 101 | * |
f69b5e8f | 102 | * Return true if the elevator @e's name or alias matches @name. |
68c43f13 | 103 | */ |
ffb86425 | 104 | static bool elevator_match(const struct elevator_type *e, const char *name) |
68c43f13 | 105 | { |
ffb86425 CH |
106 | return !strcmp(e->elevator_name, name) || |
107 | (e->elevator_alias && !strcmp(e->elevator_alias, name)); | |
8ac0d9a8 JA |
108 | } |
109 | ||
ffb86425 | 110 | static struct elevator_type *__elevator_find(const char *name) |
1da177e4 | 111 | { |
a22b169d | 112 | struct elevator_type *e; |
1da177e4 | 113 | |
ffb86425 CH |
114 | list_for_each_entry(e, &elv_list, list) |
115 | if (elevator_match(e, name)) | |
a22b169d | 116 | return e; |
a22b169d | 117 | return NULL; |
1da177e4 LT |
118 | } |
119 | ||
ee7ff15b | 120 | static struct elevator_type *elevator_find_get(const char *name) |
1da177e4 | 121 | { |
2824bc93 | 122 | struct elevator_type *e; |
1da177e4 | 123 | |
2a12dcd7 | 124 | spin_lock(&elv_list_lock); |
ffb86425 | 125 | e = __elevator_find(name); |
e4eb37cc | 126 | if (e && (!elevator_tryget(e))) |
2824bc93 | 127 | e = NULL; |
2a12dcd7 | 128 | spin_unlock(&elv_list_lock); |
1da177e4 LT |
129 | return e; |
130 | } | |
131 | ||
5f622417 | 132 | static const struct kobj_type elv_ktype; |
3d1ab40f | 133 | |
d50235b7 | 134 | struct elevator_queue *elevator_alloc(struct request_queue *q, |
165125e1 | 135 | struct elevator_type *e) |
3d1ab40f | 136 | { |
b374d18a | 137 | struct elevator_queue *eq; |
9817064b | 138 | |
c1b511eb | 139 | eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node); |
9817064b | 140 | if (unlikely(!eq)) |
8406a4d5 | 141 | return NULL; |
9817064b | 142 | |
8ed40ee3 | 143 | __elevator_get(e); |
22f746e2 | 144 | eq->type = e; |
f9cb074b | 145 | kobject_init(&eq->kobj, &elv_ktype); |
9817064b | 146 | mutex_init(&eq->sysfs_lock); |
242d98f0 | 147 | hash_init(eq->hash); |
9817064b | 148 | |
3d1ab40f AV |
149 | return eq; |
150 | } | |
d50235b7 | 151 | EXPORT_SYMBOL(elevator_alloc); |
3d1ab40f AV |
152 | |
153 | static void elevator_release(struct kobject *kobj) | |
154 | { | |
b374d18a | 155 | struct elevator_queue *e; |
9817064b | 156 | |
b374d18a | 157 | e = container_of(kobj, struct elevator_queue, kobj); |
22f746e2 | 158 | elevator_put(e->type); |
3d1ab40f AV |
159 | kfree(e); |
160 | } | |
161 | ||
1e44bedb | 162 | static void elevator_exit(struct request_queue *q) |
1da177e4 | 163 | { |
0c6cb3a2 CH |
164 | struct elevator_queue *e = q->elevator; |
165 | ||
559dc111 ML |
166 | lockdep_assert_held(&q->elevator_lock); |
167 | ||
28883074 CH |
168 | ioc_clear_queue(q); |
169 | blk_mq_sched_free_rqs(q); | |
170 | ||
3d1ab40f | 171 | mutex_lock(&e->sysfs_lock); |
dd1c372d | 172 | blk_mq_exit_sched(q, e); |
3d1ab40f | 173 | mutex_unlock(&e->sysfs_lock); |
1da177e4 | 174 | } |
2e662b65 | 175 | |
9817064b JA |
176 | static inline void __elv_rqhash_del(struct request *rq) |
177 | { | |
242d98f0 | 178 | hash_del(&rq->hash); |
e8064021 | 179 | rq->rq_flags &= ~RQF_HASHED; |
9817064b JA |
180 | } |
181 | ||
70b3ea05 | 182 | void elv_rqhash_del(struct request_queue *q, struct request *rq) |
9817064b JA |
183 | { |
184 | if (ELV_ON_HASH(rq)) | |
185 | __elv_rqhash_del(rq); | |
186 | } | |
bd166ef1 | 187 | EXPORT_SYMBOL_GPL(elv_rqhash_del); |
9817064b | 188 | |
70b3ea05 | 189 | void elv_rqhash_add(struct request_queue *q, struct request *rq) |
9817064b | 190 | { |
b374d18a | 191 | struct elevator_queue *e = q->elevator; |
9817064b JA |
192 | |
193 | BUG_ON(ELV_ON_HASH(rq)); | |
242d98f0 | 194 | hash_add(e->hash, &rq->hash, rq_hash_key(rq)); |
e8064021 | 195 | rq->rq_flags |= RQF_HASHED; |
9817064b | 196 | } |
bd166ef1 | 197 | EXPORT_SYMBOL_GPL(elv_rqhash_add); |
9817064b | 198 | |
70b3ea05 | 199 | void elv_rqhash_reposition(struct request_queue *q, struct request *rq) |
9817064b JA |
200 | { |
201 | __elv_rqhash_del(rq); | |
202 | elv_rqhash_add(q, rq); | |
203 | } | |
204 | ||
70b3ea05 | 205 | struct request *elv_rqhash_find(struct request_queue *q, sector_t offset) |
9817064b | 206 | { |
b374d18a | 207 | struct elevator_queue *e = q->elevator; |
b67bfe0d | 208 | struct hlist_node *next; |
9817064b JA |
209 | struct request *rq; |
210 | ||
ee89f812 | 211 | hash_for_each_possible_safe(e->hash, rq, next, hash, offset) { |
9817064b JA |
212 | BUG_ON(!ELV_ON_HASH(rq)); |
213 | ||
214 | if (unlikely(!rq_mergeable(rq))) { | |
215 | __elv_rqhash_del(rq); | |
216 | continue; | |
217 | } | |
218 | ||
219 | if (rq_hash_key(rq) == offset) | |
220 | return rq; | |
221 | } | |
222 | ||
223 | return NULL; | |
224 | } | |
225 | ||
2e662b65 JA |
226 | /* |
227 | * RB-tree support functions for inserting/lookup/removal of requests | |
228 | * in a sorted RB tree. | |
229 | */ | |
796d5116 | 230 | void elv_rb_add(struct rb_root *root, struct request *rq) |
2e662b65 JA |
231 | { |
232 | struct rb_node **p = &root->rb_node; | |
233 | struct rb_node *parent = NULL; | |
234 | struct request *__rq; | |
235 | ||
236 | while (*p) { | |
237 | parent = *p; | |
238 | __rq = rb_entry(parent, struct request, rb_node); | |
239 | ||
83096ebf | 240 | if (blk_rq_pos(rq) < blk_rq_pos(__rq)) |
2e662b65 | 241 | p = &(*p)->rb_left; |
796d5116 | 242 | else if (blk_rq_pos(rq) >= blk_rq_pos(__rq)) |
2e662b65 | 243 | p = &(*p)->rb_right; |
2e662b65 JA |
244 | } |
245 | ||
246 | rb_link_node(&rq->rb_node, parent, p); | |
247 | rb_insert_color(&rq->rb_node, root); | |
2e662b65 | 248 | } |
2e662b65 JA |
249 | EXPORT_SYMBOL(elv_rb_add); |
250 | ||
251 | void elv_rb_del(struct rb_root *root, struct request *rq) | |
252 | { | |
253 | BUG_ON(RB_EMPTY_NODE(&rq->rb_node)); | |
254 | rb_erase(&rq->rb_node, root); | |
255 | RB_CLEAR_NODE(&rq->rb_node); | |
256 | } | |
2e662b65 JA |
257 | EXPORT_SYMBOL(elv_rb_del); |
258 | ||
259 | struct request *elv_rb_find(struct rb_root *root, sector_t sector) | |
260 | { | |
261 | struct rb_node *n = root->rb_node; | |
262 | struct request *rq; | |
263 | ||
264 | while (n) { | |
265 | rq = rb_entry(n, struct request, rb_node); | |
266 | ||
83096ebf | 267 | if (sector < blk_rq_pos(rq)) |
2e662b65 | 268 | n = n->rb_left; |
83096ebf | 269 | else if (sector > blk_rq_pos(rq)) |
2e662b65 JA |
270 | n = n->rb_right; |
271 | else | |
272 | return rq; | |
273 | } | |
274 | ||
275 | return NULL; | |
276 | } | |
2e662b65 JA |
277 | EXPORT_SYMBOL(elv_rb_find); |
278 | ||
34fe7c05 CH |
279 | enum elv_merge elv_merge(struct request_queue *q, struct request **req, |
280 | struct bio *bio) | |
1da177e4 | 281 | { |
b374d18a | 282 | struct elevator_queue *e = q->elevator; |
9817064b | 283 | struct request *__rq; |
06b86245 | 284 | |
488991e2 AB |
285 | /* |
286 | * Levels of merges: | |
287 | * nomerges: No merges at all attempted | |
288 | * noxmerges: Only simple one-hit cache try | |
289 | * merges: All merge tries attempted | |
290 | */ | |
7460d389 | 291 | if (blk_queue_nomerges(q) || !bio_mergeable(bio)) |
488991e2 AB |
292 | return ELEVATOR_NO_MERGE; |
293 | ||
9817064b JA |
294 | /* |
295 | * First try one-hit cache. | |
296 | */ | |
72ef799b | 297 | if (q->last_merge && elv_bio_merge_ok(q->last_merge, bio)) { |
34fe7c05 CH |
298 | enum elv_merge ret = blk_try_merge(q->last_merge, bio); |
299 | ||
06b86245 TH |
300 | if (ret != ELEVATOR_NO_MERGE) { |
301 | *req = q->last_merge; | |
302 | return ret; | |
303 | } | |
304 | } | |
1da177e4 | 305 | |
488991e2 | 306 | if (blk_queue_noxmerges(q)) |
ac9fafa1 AB |
307 | return ELEVATOR_NO_MERGE; |
308 | ||
9817064b JA |
309 | /* |
310 | * See if our hash lookup can find a potential backmerge. | |
311 | */ | |
4f024f37 | 312 | __rq = elv_rqhash_find(q, bio->bi_iter.bi_sector); |
72ef799b | 313 | if (__rq && elv_bio_merge_ok(__rq, bio)) { |
9817064b | 314 | *req = __rq; |
866663b7 ML |
315 | |
316 | if (blk_discard_mergable(__rq)) | |
317 | return ELEVATOR_DISCARD_MERGE; | |
9817064b JA |
318 | return ELEVATOR_BACK_MERGE; |
319 | } | |
320 | ||
f9cd4bfe JA |
321 | if (e->type->ops.request_merge) |
322 | return e->type->ops.request_merge(q, req, bio); | |
1da177e4 LT |
323 | |
324 | return ELEVATOR_NO_MERGE; | |
325 | } | |
326 | ||
5e84ea3a JA |
327 | /* |
328 | * Attempt to do an insertion back merge. Only check for the case where | |
329 | * we can append 'rq' to an existing request, so we can throw 'rq' away | |
330 | * afterwards. | |
331 | * | |
fd2ef39c JK |
332 | * Returns true if we merged, false otherwise. 'free' will contain all |
333 | * requests that need to be freed. | |
5e84ea3a | 334 | */ |
fd2ef39c JK |
335 | bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq, |
336 | struct list_head *free) | |
5e84ea3a JA |
337 | { |
338 | struct request *__rq; | |
bee0393c | 339 | bool ret; |
5e84ea3a JA |
340 | |
341 | if (blk_queue_nomerges(q)) | |
342 | return false; | |
343 | ||
344 | /* | |
345 | * First try one-hit cache. | |
346 | */ | |
fd2ef39c JK |
347 | if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq)) { |
348 | list_add(&rq->queuelist, free); | |
5e84ea3a | 349 | return true; |
fd2ef39c | 350 | } |
5e84ea3a JA |
351 | |
352 | if (blk_queue_noxmerges(q)) | |
353 | return false; | |
354 | ||
bee0393c | 355 | ret = false; |
5e84ea3a JA |
356 | /* |
357 | * See if our hash lookup can find a potential backmerge. | |
358 | */ | |
bee0393c SL |
359 | while (1) { |
360 | __rq = elv_rqhash_find(q, blk_rq_pos(rq)); | |
361 | if (!__rq || !blk_attempt_req_merge(q, __rq, rq)) | |
362 | break; | |
363 | ||
fd2ef39c | 364 | list_add(&rq->queuelist, free); |
bee0393c SL |
365 | /* The merged request could be merged with others, try again */ |
366 | ret = true; | |
367 | rq = __rq; | |
368 | } | |
27419322 | 369 | |
bee0393c | 370 | return ret; |
5e84ea3a JA |
371 | } |
372 | ||
34fe7c05 CH |
373 | void elv_merged_request(struct request_queue *q, struct request *rq, |
374 | enum elv_merge type) | |
1da177e4 | 375 | { |
b374d18a | 376 | struct elevator_queue *e = q->elevator; |
1da177e4 | 377 | |
f9cd4bfe JA |
378 | if (e->type->ops.request_merged) |
379 | e->type->ops.request_merged(q, rq, type); | |
06b86245 | 380 | |
2e662b65 JA |
381 | if (type == ELEVATOR_BACK_MERGE) |
382 | elv_rqhash_reposition(q, rq); | |
9817064b | 383 | |
06b86245 | 384 | q->last_merge = rq; |
1da177e4 LT |
385 | } |
386 | ||
165125e1 | 387 | void elv_merge_requests(struct request_queue *q, struct request *rq, |
1da177e4 LT |
388 | struct request *next) |
389 | { | |
b374d18a | 390 | struct elevator_queue *e = q->elevator; |
bd166ef1 | 391 | |
f9cd4bfe JA |
392 | if (e->type->ops.requests_merged) |
393 | e->type->ops.requests_merged(q, rq, next); | |
06b86245 | 394 | |
9817064b | 395 | elv_rqhash_reposition(q, rq); |
06b86245 | 396 | q->last_merge = rq; |
1da177e4 LT |
397 | } |
398 | ||
165125e1 | 399 | struct request *elv_latter_request(struct request_queue *q, struct request *rq) |
1da177e4 | 400 | { |
b374d18a | 401 | struct elevator_queue *e = q->elevator; |
1da177e4 | 402 | |
f9cd4bfe JA |
403 | if (e->type->ops.next_request) |
404 | return e->type->ops.next_request(q, rq); | |
bd166ef1 | 405 | |
1da177e4 LT |
406 | return NULL; |
407 | } | |
408 | ||
165125e1 | 409 | struct request *elv_former_request(struct request_queue *q, struct request *rq) |
1da177e4 | 410 | { |
b374d18a | 411 | struct elevator_queue *e = q->elevator; |
1da177e4 | 412 | |
f9cd4bfe JA |
413 | if (e->type->ops.former_request) |
414 | return e->type->ops.former_request(q, rq); | |
bd166ef1 | 415 | |
a1ce35fa | 416 | return NULL; |
1da177e4 LT |
417 | } |
418 | ||
044792cd | 419 | #define to_elv(atr) container_of_const((atr), struct elv_fs_entry, attr) |
3d1ab40f AV |
420 | |
421 | static ssize_t | |
422 | elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page) | |
1da177e4 | 423 | { |
044792cd | 424 | const struct elv_fs_entry *entry = to_elv(attr); |
b374d18a | 425 | struct elevator_queue *e; |
5c3d858c | 426 | ssize_t error = -ENODEV; |
3d1ab40f AV |
427 | |
428 | if (!entry->show) | |
429 | return -EIO; | |
430 | ||
b374d18a | 431 | e = container_of(kobj, struct elevator_queue, kobj); |
3d1ab40f | 432 | mutex_lock(&e->sysfs_lock); |
5c3d858c ML |
433 | if (!test_bit(ELEVATOR_FLAG_DYING, &e->flags)) |
434 | error = entry->show(e, page); | |
3d1ab40f AV |
435 | mutex_unlock(&e->sysfs_lock); |
436 | return error; | |
437 | } | |
1da177e4 | 438 | |
3d1ab40f AV |
439 | static ssize_t |
440 | elv_attr_store(struct kobject *kobj, struct attribute *attr, | |
441 | const char *page, size_t length) | |
442 | { | |
044792cd | 443 | const struct elv_fs_entry *entry = to_elv(attr); |
b374d18a | 444 | struct elevator_queue *e; |
5c3d858c | 445 | ssize_t error = -ENODEV; |
1da177e4 | 446 | |
3d1ab40f AV |
447 | if (!entry->store) |
448 | return -EIO; | |
1da177e4 | 449 | |
b374d18a | 450 | e = container_of(kobj, struct elevator_queue, kobj); |
3d1ab40f | 451 | mutex_lock(&e->sysfs_lock); |
5c3d858c ML |
452 | if (!test_bit(ELEVATOR_FLAG_DYING, &e->flags)) |
453 | error = entry->store(e, page, length); | |
3d1ab40f AV |
454 | mutex_unlock(&e->sysfs_lock); |
455 | return error; | |
456 | } | |
457 | ||
52cf25d0 | 458 | static const struct sysfs_ops elv_sysfs_ops = { |
3d1ab40f AV |
459 | .show = elv_attr_show, |
460 | .store = elv_attr_store, | |
461 | }; | |
462 | ||
5f622417 | 463 | static const struct kobj_type elv_ktype = { |
3d1ab40f AV |
464 | .sysfs_ops = &elv_sysfs_ops, |
465 | .release = elevator_release, | |
466 | }; | |
467 | ||
a3dc6279 ML |
468 | static int elv_register_queue(struct request_queue *q, |
469 | struct elevator_queue *e, | |
470 | bool uevent) | |
3d1ab40f | 471 | { |
3d1ab40f AV |
472 | int error; |
473 | ||
2bd85221 | 474 | error = kobject_add(&e->kobj, &q->disk->queue_kobj, "iosched"); |
3d1ab40f | 475 | if (!error) { |
044792cd | 476 | const struct elv_fs_entry *attr = e->type->elevator_attrs; |
3d1ab40f | 477 | if (attr) { |
e572ec7e AV |
478 | while (attr->attr.name) { |
479 | if (sysfs_create_file(&e->kobj, &attr->attr)) | |
3d1ab40f | 480 | break; |
e572ec7e | 481 | attr++; |
3d1ab40f AV |
482 | } |
483 | } | |
cecf5d87 ML |
484 | if (uevent) |
485 | kobject_uevent(&e->kobj, KOBJ_ADD); | |
486 | ||
92c22d7e ML |
487 | /* |
488 | * Sched is initialized, it is ready to export it via | |
489 | * debugfs | |
490 | */ | |
491 | blk_mq_sched_reg_debugfs(q); | |
181d0663 | 492 | set_bit(ELEVATOR_FLAG_REGISTERED, &e->flags); |
3d1ab40f AV |
493 | } |
494 | return error; | |
1da177e4 | 495 | } |
bc1c1169 | 496 | |
a3dc6279 ML |
497 | static void elv_unregister_queue(struct request_queue *q, |
498 | struct elevator_queue *e) | |
1da177e4 | 499 | { |
181d0663 | 500 | if (e && test_and_clear_bit(ELEVATOR_FLAG_REGISTERED, &e->flags)) { |
f8fc877d TH |
501 | kobject_uevent(&e->kobj, KOBJ_REMOVE); |
502 | kobject_del(&e->kobj); | |
92c22d7e ML |
503 | |
504 | /* unexport via debugfs before exiting sched */ | |
505 | blk_mq_sched_unreg_debugfs(q); | |
f8fc877d | 506 | } |
1da177e4 LT |
507 | } |
508 | ||
e567bf71 | 509 | int elv_register(struct elevator_type *e) |
1da177e4 | 510 | { |
e5c0ca13 CZ |
511 | /* finish request is mandatory */ |
512 | if (WARN_ON_ONCE(!e->ops.finish_request)) | |
513 | return -EINVAL; | |
e42cfb1d DLM |
514 | /* insert_requests and dispatch_request are mandatory */ |
515 | if (WARN_ON_ONCE(!e->ops.insert_requests || !e->ops.dispatch_request)) | |
516 | return -EINVAL; | |
517 | ||
3d3c2379 TH |
518 | /* create icq_cache if requested */ |
519 | if (e->icq_size) { | |
520 | if (WARN_ON(e->icq_size < sizeof(struct io_cq)) || | |
521 | WARN_ON(e->icq_align < __alignof__(struct io_cq))) | |
522 | return -EINVAL; | |
523 | ||
524 | snprintf(e->icq_cache_name, sizeof(e->icq_cache_name), | |
525 | "%s_io_cq", e->elevator_name); | |
526 | e->icq_cache = kmem_cache_create(e->icq_cache_name, e->icq_size, | |
527 | e->icq_align, 0, NULL); | |
528 | if (!e->icq_cache) | |
529 | return -ENOMEM; | |
530 | } | |
531 | ||
532 | /* register, don't allow duplicate names */ | |
2a12dcd7 | 533 | spin_lock(&elv_list_lock); |
ffb86425 | 534 | if (__elevator_find(e->elevator_name)) { |
3d3c2379 | 535 | spin_unlock(&elv_list_lock); |
62d2a194 | 536 | kmem_cache_destroy(e->icq_cache); |
3d3c2379 TH |
537 | return -EBUSY; |
538 | } | |
1da177e4 | 539 | list_add_tail(&e->list, &elv_list); |
2a12dcd7 | 540 | spin_unlock(&elv_list_lock); |
1da177e4 | 541 | |
d0b0a81a HT |
542 | printk(KERN_INFO "io scheduler %s registered\n", e->elevator_name); |
543 | ||
3d3c2379 | 544 | return 0; |
1da177e4 LT |
545 | } |
546 | EXPORT_SYMBOL_GPL(elv_register); | |
547 | ||
548 | void elv_unregister(struct elevator_type *e) | |
549 | { | |
3d3c2379 | 550 | /* unregister */ |
2a12dcd7 | 551 | spin_lock(&elv_list_lock); |
1da177e4 | 552 | list_del_init(&e->list); |
2a12dcd7 | 553 | spin_unlock(&elv_list_lock); |
3d3c2379 TH |
554 | |
555 | /* | |
556 | * Destroy icq_cache if it exists. icq's are RCU managed. Make | |
557 | * sure all RCU operations are complete before proceeding. | |
558 | */ | |
559 | if (e->icq_cache) { | |
560 | rcu_barrier(); | |
561 | kmem_cache_destroy(e->icq_cache); | |
562 | e->icq_cache = NULL; | |
563 | } | |
1da177e4 LT |
564 | } |
565 | EXPORT_SYMBOL_GPL(elv_unregister); | |
566 | ||
567 | /* | |
ac1171bd JC |
568 | * Switch to new_e io scheduler. |
569 | * | |
570 | * If switching fails, we are most likely running out of memory and not able | |
571 | * to restore the old io scheduler, so leaving the io scheduler being none. | |
1da177e4 | 572 | */ |
1e9db5c4 | 573 | static int elevator_switch(struct request_queue *q, struct elv_change_ctx *ctx) |
1da177e4 | 574 | { |
1bb7fba0 CH |
575 | struct elevator_type *new_e = NULL; |
576 | int ret = 0; | |
1da177e4 | 577 | |
f8e111c8 | 578 | WARN_ON_ONCE(q->mq_freeze_depth == 0); |
1bf70d08 | 579 | lockdep_assert_held(&q->elevator_lock); |
14a23498 | 580 | |
1e9db5c4 ML |
581 | if (strncmp(ctx->name, "none", 4)) { |
582 | new_e = elevator_find_get(ctx->name); | |
1bb7fba0 CH |
583 | if (!new_e) |
584 | return -EINVAL; | |
585 | } | |
a11abb98 | 586 | |
a1ce35fa | 587 | blk_mq_quiesce_queue(q); |
4722dc52 | 588 | |
64b36075 | 589 | if (q->elevator) { |
559dc111 | 590 | ctx->old = q->elevator; |
64b36075 CH |
591 | elevator_exit(q); |
592 | } | |
593 | ||
1bb7fba0 CH |
594 | if (new_e) { |
595 | ret = blk_mq_init_sched(q, new_e); | |
596 | if (ret) | |
597 | goto out_unfreeze; | |
559dc111 | 598 | ctx->new = q->elevator; |
1bb7fba0 CH |
599 | } else { |
600 | blk_queue_flag_clear(QUEUE_FLAG_SQ_SCHED, q); | |
601 | q->elevator = NULL; | |
602 | q->nr_requests = q->tag_set->queue_depth; | |
64b36075 | 603 | } |
1e9db5c4 | 604 | blk_add_trace_msg(q, "elv switch: %s", ctx->name); |
1da177e4 | 605 | |
64b36075 | 606 | out_unfreeze: |
a1ce35fa | 607 | blk_mq_unquiesce_queue(q); |
e0cca8bc JC |
608 | |
609 | if (ret) { | |
610 | pr_warn("elv: switch to \"%s\" failed, falling back to \"none\"\n", | |
611 | new_e->elevator_name); | |
612 | } | |
613 | ||
1bb7fba0 CH |
614 | if (new_e) |
615 | elevator_put(new_e); | |
64b36075 CH |
616 | return ret; |
617 | } | |
618 | ||
559dc111 ML |
619 | static void elv_exit_and_release(struct request_queue *q) |
620 | { | |
621 | struct elevator_queue *e; | |
622 | unsigned memflags; | |
623 | ||
624 | memflags = blk_mq_freeze_queue(q); | |
625 | mutex_lock(&q->elevator_lock); | |
626 | e = q->elevator; | |
627 | elevator_exit(q); | |
628 | mutex_unlock(&q->elevator_lock); | |
629 | blk_mq_unfreeze_queue(q, memflags); | |
630 | if (e) | |
631 | kobject_put(&e->kobj); | |
632 | } | |
633 | ||
634 | static int elevator_change_done(struct request_queue *q, | |
635 | struct elv_change_ctx *ctx) | |
636 | { | |
637 | int ret = 0; | |
638 | ||
639 | if (ctx->old) { | |
78c27134 ML |
640 | bool enable_wbt = test_bit(ELEVATOR_FLAG_ENABLE_WBT_ON_EXIT, |
641 | &ctx->old->flags); | |
642 | ||
559dc111 ML |
643 | elv_unregister_queue(q, ctx->old); |
644 | kobject_put(&ctx->old->kobj); | |
78c27134 ML |
645 | if (enable_wbt) |
646 | wbt_enable_default(q->disk); | |
559dc111 ML |
647 | } |
648 | if (ctx->new) { | |
649 | ret = elv_register_queue(q, ctx->new, !ctx->no_uevent); | |
650 | if (ret) | |
651 | elv_exit_and_release(q); | |
652 | } | |
653 | return ret; | |
654 | } | |
655 | ||
5dd531a0 JA |
656 | /* |
657 | * Switch this queue to the given IO scheduler. | |
658 | */ | |
1e9db5c4 | 659 | static int elevator_change(struct request_queue *q, struct elv_change_ctx *ctx) |
1da177e4 | 660 | { |
20117b5a ML |
661 | unsigned int memflags; |
662 | int ret = 0; | |
b54c2ad9 | 663 | |
20117b5a ML |
664 | lockdep_assert_held(&q->tag_set->update_nr_hwq_lock); |
665 | ||
666 | memflags = blk_mq_freeze_queue(q); | |
1e44bedb ML |
667 | /* |
668 | * May be called before adding disk, when there isn't any FS I/O, | |
669 | * so freezing queue plus canceling dispatch work is enough to | |
670 | * drain any dispatch activities originated from passthrough | |
671 | * requests, then no need to quiesce queue which may add long boot | |
672 | * latency, especially when lots of disks are involved. | |
673 | * | |
674 | * Disk isn't added yet, so verifying queue lock only manually. | |
675 | */ | |
676 | blk_mq_cancel_work_sync(q); | |
20117b5a | 677 | mutex_lock(&q->elevator_lock); |
1e9db5c4 ML |
678 | if (!(q->elevator && elevator_match(q->elevator->type, ctx->name))) |
679 | ret = elevator_switch(q, ctx); | |
20117b5a ML |
680 | mutex_unlock(&q->elevator_lock); |
681 | blk_mq_unfreeze_queue(q, memflags); | |
559dc111 ML |
682 | if (!ret) |
683 | ret = elevator_change_done(q, ctx); | |
684 | ||
20117b5a | 685 | return ret; |
5dd531a0 | 686 | } |
7c8a3679 | 687 | |
596dce11 ML |
688 | /* |
689 | * The I/O scheduler depends on the number of hardware queues, this forces a | |
690 | * reattachment when nr_hw_queues changes. | |
691 | */ | |
692 | void elv_update_nr_hw_queues(struct request_queue *q) | |
693 | { | |
559dc111 ML |
694 | struct elv_change_ctx ctx = {}; |
695 | int ret = -ENODEV; | |
696 | ||
596dce11 ML |
697 | WARN_ON_ONCE(q->mq_freeze_depth == 0); |
698 | ||
699 | mutex_lock(&q->elevator_lock); | |
532b9e11 | 700 | if (q->elevator && !blk_queue_dying(q) && blk_queue_registered(q)) { |
559dc111 | 701 | ctx.name = q->elevator->type->elevator_name; |
596dce11 ML |
702 | |
703 | /* force to reattach elevator after nr_hw_queue is updated */ | |
559dc111 | 704 | ret = elevator_switch(q, &ctx); |
596dce11 ML |
705 | } |
706 | mutex_unlock(&q->elevator_lock); | |
559dc111 ML |
707 | blk_mq_unfreeze_queue_nomemrestore(q); |
708 | if (!ret) | |
709 | WARN_ON_ONCE(elevator_change_done(q, &ctx)); | |
596dce11 ML |
710 | } |
711 | ||
1e44bedb ML |
712 | /* |
713 | * Use the default elevator settings. If the chosen elevator initialization | |
714 | * fails, fall back to the "none" elevator (no elevator). | |
715 | */ | |
716 | void elevator_set_default(struct request_queue *q) | |
717 | { | |
718 | struct elv_change_ctx ctx = { | |
719 | .name = "mq-deadline", | |
720 | .no_uevent = true, | |
721 | }; | |
722 | int err = 0; | |
723 | ||
21eed794 ML |
724 | /* now we allow to switch elevator */ |
725 | blk_queue_flag_clear(QUEUE_FLAG_NO_ELV_SWITCH, q); | |
726 | ||
1e44bedb ML |
727 | if (q->tag_set->flags & BLK_MQ_F_NO_SCHED_BY_DEFAULT) |
728 | return; | |
729 | ||
730 | /* | |
731 | * For single queue devices, default to using mq-deadline. If we | |
732 | * have multiple queues or mq-deadline is not available, default | |
733 | * to "none". | |
734 | */ | |
735 | if (elevator_find_get(ctx.name) && (q->nr_hw_queues == 1 || | |
736 | blk_mq_is_shared_tags(q->tag_set->flags))) | |
737 | err = elevator_change(q, &ctx); | |
738 | if (err < 0) | |
739 | pr_warn("\"%s\" elevator initialization, failed %d, " | |
740 | "falling back to \"none\"\n", ctx.name, err); | |
741 | } | |
742 | ||
743 | void elevator_set_none(struct request_queue *q) | |
744 | { | |
745 | struct elv_change_ctx ctx = { | |
746 | .name = "none", | |
747 | }; | |
748 | int err; | |
749 | ||
750 | err = elevator_change(q, &ctx); | |
751 | if (err < 0) | |
752 | pr_warn("%s: set none elevator failed %d\n", __func__, err); | |
753 | } | |
754 | ||
1e9db5c4 | 755 | static void elv_iosched_load_module(const char *elevator_name) |
734e1a86 | 756 | { |
b4ff6e93 | 757 | struct elevator_type *found; |
734e1a86 | 758 | |
b4ff6e93 | 759 | spin_lock(&elv_list_lock); |
1bf70d08 | 760 | found = __elevator_find(elevator_name); |
b4ff6e93 BL |
761 | spin_unlock(&elv_list_lock); |
762 | ||
763 | if (!found) | |
1bf70d08 | 764 | request_module("%s-iosched", elevator_name); |
734e1a86 DLM |
765 | } |
766 | ||
62e35f94 | 767 | ssize_t elv_iosched_store(struct gendisk *disk, const char *buf, |
5dd531a0 JA |
768 | size_t count) |
769 | { | |
58367c8a | 770 | char elevator_name[ELV_NAME_MAX]; |
1e9db5c4 | 771 | struct elv_change_ctx ctx = {}; |
5dd531a0 | 772 | int ret; |
b07a889e | 773 | struct request_queue *q = disk->queue; |
b126d9d7 | 774 | struct blk_mq_tag_set *set = q->tag_set; |
5dd531a0 | 775 | |
ac55b71a ML |
776 | /* Make sure queue is not in the middle of being removed */ |
777 | if (!blk_queue_registered(q)) | |
778 | return -ENOENT; | |
779 | ||
b07a889e NS |
780 | /* |
781 | * If the attribute needs to load a module, do it before freezing the | |
782 | * queue to ensure that the module file can be read when the request | |
783 | * queue is the one for the device storing the module file. | |
784 | */ | |
20d09975 | 785 | strscpy(elevator_name, buf, sizeof(elevator_name)); |
1e9db5c4 | 786 | ctx.name = strstrip(elevator_name); |
1bf70d08 | 787 | |
1e9db5c4 | 788 | elv_iosched_load_module(ctx.name); |
b07a889e | 789 | |
b126d9d7 | 790 | down_read(&set->update_nr_hwq_lock); |
21eed794 ML |
791 | if (!blk_queue_no_elv_switch(q)) { |
792 | ret = elevator_change(q, &ctx); | |
793 | if (!ret) | |
794 | ret = count; | |
795 | } else { | |
796 | ret = -ENOENT; | |
797 | } | |
b126d9d7 | 798 | up_read(&set->update_nr_hwq_lock); |
5dd531a0 | 799 | return ret; |
1da177e4 LT |
800 | } |
801 | ||
62e35f94 | 802 | ssize_t elv_iosched_show(struct gendisk *disk, char *name) |
1da177e4 | 803 | { |
62e35f94 | 804 | struct request_queue *q = disk->queue; |
16095af2 | 805 | struct elevator_type *cur = NULL, *e; |
1da177e4 LT |
806 | int len = 0; |
807 | ||
1bf70d08 | 808 | mutex_lock(&q->elevator_lock); |
7919d679 | 809 | if (!q->elevator) { |
bd166ef1 | 810 | len += sprintf(name+len, "[none] "); |
7919d679 JC |
811 | } else { |
812 | len += sprintf(name+len, "none "); | |
94209d27 | 813 | cur = q->elevator->type; |
7919d679 | 814 | } |
cd43e26f | 815 | |
2a12dcd7 | 816 | spin_lock(&elv_list_lock); |
16095af2 | 817 | list_for_each_entry(e, &elv_list, list) { |
5998249e | 818 | if (e == cur) |
7a3b3660 | 819 | len += sprintf(name+len, "[%s] ", e->elevator_name); |
e4eb37cc | 820 | else |
16095af2 | 821 | len += sprintf(name+len, "%s ", e->elevator_name); |
1da177e4 | 822 | } |
2a12dcd7 | 823 | spin_unlock(&elv_list_lock); |
1da177e4 | 824 | |
c6451ede | 825 | len += sprintf(name+len, "\n"); |
1bf70d08 | 826 | mutex_unlock(&q->elevator_lock); |
b07a889e | 827 | |
1da177e4 LT |
828 | return len; |
829 | } | |
830 | ||
165125e1 JA |
831 | struct request *elv_rb_former_request(struct request_queue *q, |
832 | struct request *rq) | |
2e662b65 JA |
833 | { |
834 | struct rb_node *rbprev = rb_prev(&rq->rb_node); | |
835 | ||
836 | if (rbprev) | |
837 | return rb_entry_rq(rbprev); | |
838 | ||
839 | return NULL; | |
840 | } | |
2e662b65 JA |
841 | EXPORT_SYMBOL(elv_rb_former_request); |
842 | ||
165125e1 JA |
843 | struct request *elv_rb_latter_request(struct request_queue *q, |
844 | struct request *rq) | |
2e662b65 JA |
845 | { |
846 | struct rb_node *rbnext = rb_next(&rq->rb_node); | |
847 | ||
848 | if (rbnext) | |
849 | return rb_entry_rq(rbnext); | |
850 | ||
851 | return NULL; | |
852 | } | |
2e662b65 | 853 | EXPORT_SYMBOL(elv_rb_latter_request); |
f8db3835 JK |
854 | |
855 | static int __init elevator_setup(char *str) | |
856 | { | |
857 | pr_warn("Kernel parameter elevator= does not have any effect anymore.\n" | |
858 | "Please use sysfs to set IO scheduler for individual devices.\n"); | |
859 | return 1; | |
860 | } | |
861 | ||
862 | __setup("elevator=", elevator_setup); |