Commit | Line | Data |
---|---|---|
3dcf60bc | 1 | // SPDX-License-Identifier: GPL-2.0 |
1da177e4 | 2 | /* |
1da177e4 LT |
3 | * Block device elevator/IO-scheduler. |
4 | * | |
5 | * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE | |
6 | * | |
0fe23479 | 7 | * 30042000 Jens Axboe <axboe@kernel.dk> : |
1da177e4 LT |
8 | * |
9 | * Split the elevator a bit so that it is possible to choose a different | |
10 | * one or even write a new "plug in". There are three pieces: | |
11 | * - elevator_fn, inserts a new request in the queue list | |
12 | * - elevator_merge_fn, decides whether a new buffer can be merged with | |
13 | * an existing request | |
14 | * - elevator_dequeue_fn, called when a request is taken off the active list | |
15 | * | |
16 | * 20082000 Dave Jones <davej@suse.de> : | |
17 | * Removed tests for max-bomb-segments, which was breaking elvtune | |
18 | * when run without -bN | |
19 | * | |
20 | * Jens: | |
21 | * - Rework again to work with bio instead of buffer_heads | |
22 | * - loose bi_dev comparisons, partition handling is right now | |
23 | * - completely modularize elevator setup and teardown | |
24 | * | |
25 | */ | |
26 | #include <linux/kernel.h> | |
27 | #include <linux/fs.h> | |
28 | #include <linux/blkdev.h> | |
1da177e4 | 29 | #include <linux/bio.h> |
1da177e4 LT |
30 | #include <linux/module.h> |
31 | #include <linux/slab.h> | |
32 | #include <linux/init.h> | |
33 | #include <linux/compiler.h> | |
2056a782 | 34 | #include <linux/blktrace_api.h> |
9817064b | 35 | #include <linux/hash.h> |
0835da67 | 36 | #include <linux/uaccess.h> |
c8158819 | 37 | #include <linux/pm_runtime.h> |
1da177e4 | 38 | |
55782138 LZ |
39 | #include <trace/events/block.h> |
40 | ||
2e9bc346 | 41 | #include "elevator.h" |
242f9dcb | 42 | #include "blk.h" |
bd166ef1 | 43 | #include "blk-mq-sched.h" |
bca6b067 | 44 | #include "blk-pm.h" |
8330cdb0 | 45 | #include "blk-wbt.h" |
672fdcf0 | 46 | #include "blk-cgroup.h" |
242f9dcb | 47 | |
1da177e4 LT |
48 | static DEFINE_SPINLOCK(elv_list_lock); |
49 | static LIST_HEAD(elv_list); | |
50 | ||
9817064b JA |
51 | /* |
52 | * Merge hash stuff. | |
53 | */ | |
83096ebf | 54 | #define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq)) |
9817064b | 55 | |
da775265 JA |
56 | /* |
57 | * Query io scheduler to see if the current process issuing bio may be | |
58 | * merged with rq. | |
59 | */ | |
72ef799b | 60 | static int elv_iosched_allow_bio_merge(struct request *rq, struct bio *bio) |
da775265 | 61 | { |
165125e1 | 62 | struct request_queue *q = rq->q; |
b374d18a | 63 | struct elevator_queue *e = q->elevator; |
da775265 | 64 | |
f9cd4bfe JA |
65 | if (e->type->ops.allow_merge) |
66 | return e->type->ops.allow_merge(q, rq, bio); | |
da775265 JA |
67 | |
68 | return 1; | |
69 | } | |
70 | ||
1da177e4 LT |
71 | /* |
72 | * can we safely merge with this request? | |
73 | */ | |
72ef799b | 74 | bool elv_bio_merge_ok(struct request *rq, struct bio *bio) |
1da177e4 | 75 | { |
050c8ea8 | 76 | if (!blk_rq_merge_ok(rq, bio)) |
72ef799b | 77 | return false; |
7ba1ba12 | 78 | |
72ef799b TE |
79 | if (!elv_iosched_allow_bio_merge(rq, bio)) |
80 | return false; | |
1da177e4 | 81 | |
72ef799b | 82 | return true; |
1da177e4 | 83 | } |
72ef799b | 84 | EXPORT_SYMBOL(elv_bio_merge_ok); |
1da177e4 | 85 | |
ffb86425 CH |
86 | static inline bool elv_support_features(struct request_queue *q, |
87 | const struct elevator_type *e) | |
8ac0d9a8 | 88 | { |
ffb86425 CH |
89 | return (q->required_elevator_features & e->elevator_features) == |
90 | q->required_elevator_features; | |
68c43f13 DLM |
91 | } |
92 | ||
93 | /** | |
94 | * elevator_match - Test an elevator name and features | |
95 | * @e: Scheduler to test | |
96 | * @name: Elevator name to test | |
97 | * @required_features: Features that the elevator must provide | |
98 | * | |
5b8f65e1 RD |
99 | * Return true if the elevator @e name matches @name and if @e provides all |
100 | * the features specified by @required_features. | |
68c43f13 | 101 | */ |
ffb86425 | 102 | static bool elevator_match(const struct elevator_type *e, const char *name) |
68c43f13 | 103 | { |
ffb86425 CH |
104 | return !strcmp(e->elevator_name, name) || |
105 | (e->elevator_alias && !strcmp(e->elevator_alias, name)); | |
8ac0d9a8 JA |
106 | } |
107 | ||
ffb86425 | 108 | static struct elevator_type *__elevator_find(const char *name) |
1da177e4 | 109 | { |
a22b169d | 110 | struct elevator_type *e; |
1da177e4 | 111 | |
ffb86425 CH |
112 | list_for_each_entry(e, &elv_list, list) |
113 | if (elevator_match(e, name)) | |
a22b169d | 114 | return e; |
a22b169d | 115 | return NULL; |
1da177e4 LT |
116 | } |
117 | ||
81eaca44 CH |
118 | static struct elevator_type *elevator_find_get(struct request_queue *q, |
119 | const char *name) | |
1da177e4 | 120 | { |
2824bc93 | 121 | struct elevator_type *e; |
1da177e4 | 122 | |
2a12dcd7 | 123 | spin_lock(&elv_list_lock); |
ffb86425 CH |
124 | e = __elevator_find(name); |
125 | if (e && (!elv_support_features(q, e) || !elevator_tryget(e))) | |
2824bc93 | 126 | e = NULL; |
2a12dcd7 | 127 | spin_unlock(&elv_list_lock); |
1da177e4 LT |
128 | return e; |
129 | } | |
130 | ||
3d1ab40f AV |
131 | static struct kobj_type elv_ktype; |
132 | ||
d50235b7 | 133 | struct elevator_queue *elevator_alloc(struct request_queue *q, |
165125e1 | 134 | struct elevator_type *e) |
3d1ab40f | 135 | { |
b374d18a | 136 | struct elevator_queue *eq; |
9817064b | 137 | |
c1b511eb | 138 | eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node); |
9817064b | 139 | if (unlikely(!eq)) |
8406a4d5 | 140 | return NULL; |
9817064b | 141 | |
8ed40ee3 | 142 | __elevator_get(e); |
22f746e2 | 143 | eq->type = e; |
f9cb074b | 144 | kobject_init(&eq->kobj, &elv_ktype); |
9817064b | 145 | mutex_init(&eq->sysfs_lock); |
242d98f0 | 146 | hash_init(eq->hash); |
9817064b | 147 | |
3d1ab40f AV |
148 | return eq; |
149 | } | |
d50235b7 | 150 | EXPORT_SYMBOL(elevator_alloc); |
3d1ab40f AV |
151 | |
152 | static void elevator_release(struct kobject *kobj) | |
153 | { | |
b374d18a | 154 | struct elevator_queue *e; |
9817064b | 155 | |
b374d18a | 156 | e = container_of(kobj, struct elevator_queue, kobj); |
22f746e2 | 157 | elevator_put(e->type); |
3d1ab40f AV |
158 | kfree(e); |
159 | } | |
160 | ||
0c6cb3a2 | 161 | void elevator_exit(struct request_queue *q) |
1da177e4 | 162 | { |
0c6cb3a2 CH |
163 | struct elevator_queue *e = q->elevator; |
164 | ||
28883074 CH |
165 | ioc_clear_queue(q); |
166 | blk_mq_sched_free_rqs(q); | |
167 | ||
3d1ab40f | 168 | mutex_lock(&e->sysfs_lock); |
dd1c372d | 169 | blk_mq_exit_sched(q, e); |
3d1ab40f | 170 | mutex_unlock(&e->sysfs_lock); |
1da177e4 | 171 | |
3d1ab40f | 172 | kobject_put(&e->kobj); |
1da177e4 | 173 | } |
2e662b65 | 174 | |
9817064b JA |
175 | static inline void __elv_rqhash_del(struct request *rq) |
176 | { | |
242d98f0 | 177 | hash_del(&rq->hash); |
e8064021 | 178 | rq->rq_flags &= ~RQF_HASHED; |
9817064b JA |
179 | } |
180 | ||
70b3ea05 | 181 | void elv_rqhash_del(struct request_queue *q, struct request *rq) |
9817064b JA |
182 | { |
183 | if (ELV_ON_HASH(rq)) | |
184 | __elv_rqhash_del(rq); | |
185 | } | |
bd166ef1 | 186 | EXPORT_SYMBOL_GPL(elv_rqhash_del); |
9817064b | 187 | |
70b3ea05 | 188 | void elv_rqhash_add(struct request_queue *q, struct request *rq) |
9817064b | 189 | { |
b374d18a | 190 | struct elevator_queue *e = q->elevator; |
9817064b JA |
191 | |
192 | BUG_ON(ELV_ON_HASH(rq)); | |
242d98f0 | 193 | hash_add(e->hash, &rq->hash, rq_hash_key(rq)); |
e8064021 | 194 | rq->rq_flags |= RQF_HASHED; |
9817064b | 195 | } |
bd166ef1 | 196 | EXPORT_SYMBOL_GPL(elv_rqhash_add); |
9817064b | 197 | |
70b3ea05 | 198 | void elv_rqhash_reposition(struct request_queue *q, struct request *rq) |
9817064b JA |
199 | { |
200 | __elv_rqhash_del(rq); | |
201 | elv_rqhash_add(q, rq); | |
202 | } | |
203 | ||
70b3ea05 | 204 | struct request *elv_rqhash_find(struct request_queue *q, sector_t offset) |
9817064b | 205 | { |
b374d18a | 206 | struct elevator_queue *e = q->elevator; |
b67bfe0d | 207 | struct hlist_node *next; |
9817064b JA |
208 | struct request *rq; |
209 | ||
ee89f812 | 210 | hash_for_each_possible_safe(e->hash, rq, next, hash, offset) { |
9817064b JA |
211 | BUG_ON(!ELV_ON_HASH(rq)); |
212 | ||
213 | if (unlikely(!rq_mergeable(rq))) { | |
214 | __elv_rqhash_del(rq); | |
215 | continue; | |
216 | } | |
217 | ||
218 | if (rq_hash_key(rq) == offset) | |
219 | return rq; | |
220 | } | |
221 | ||
222 | return NULL; | |
223 | } | |
224 | ||
2e662b65 JA |
225 | /* |
226 | * RB-tree support functions for inserting/lookup/removal of requests | |
227 | * in a sorted RB tree. | |
228 | */ | |
796d5116 | 229 | void elv_rb_add(struct rb_root *root, struct request *rq) |
2e662b65 JA |
230 | { |
231 | struct rb_node **p = &root->rb_node; | |
232 | struct rb_node *parent = NULL; | |
233 | struct request *__rq; | |
234 | ||
235 | while (*p) { | |
236 | parent = *p; | |
237 | __rq = rb_entry(parent, struct request, rb_node); | |
238 | ||
83096ebf | 239 | if (blk_rq_pos(rq) < blk_rq_pos(__rq)) |
2e662b65 | 240 | p = &(*p)->rb_left; |
796d5116 | 241 | else if (blk_rq_pos(rq) >= blk_rq_pos(__rq)) |
2e662b65 | 242 | p = &(*p)->rb_right; |
2e662b65 JA |
243 | } |
244 | ||
245 | rb_link_node(&rq->rb_node, parent, p); | |
246 | rb_insert_color(&rq->rb_node, root); | |
2e662b65 | 247 | } |
2e662b65 JA |
248 | EXPORT_SYMBOL(elv_rb_add); |
249 | ||
250 | void elv_rb_del(struct rb_root *root, struct request *rq) | |
251 | { | |
252 | BUG_ON(RB_EMPTY_NODE(&rq->rb_node)); | |
253 | rb_erase(&rq->rb_node, root); | |
254 | RB_CLEAR_NODE(&rq->rb_node); | |
255 | } | |
2e662b65 JA |
256 | EXPORT_SYMBOL(elv_rb_del); |
257 | ||
258 | struct request *elv_rb_find(struct rb_root *root, sector_t sector) | |
259 | { | |
260 | struct rb_node *n = root->rb_node; | |
261 | struct request *rq; | |
262 | ||
263 | while (n) { | |
264 | rq = rb_entry(n, struct request, rb_node); | |
265 | ||
83096ebf | 266 | if (sector < blk_rq_pos(rq)) |
2e662b65 | 267 | n = n->rb_left; |
83096ebf | 268 | else if (sector > blk_rq_pos(rq)) |
2e662b65 JA |
269 | n = n->rb_right; |
270 | else | |
271 | return rq; | |
272 | } | |
273 | ||
274 | return NULL; | |
275 | } | |
2e662b65 JA |
276 | EXPORT_SYMBOL(elv_rb_find); |
277 | ||
34fe7c05 CH |
278 | enum elv_merge elv_merge(struct request_queue *q, struct request **req, |
279 | struct bio *bio) | |
1da177e4 | 280 | { |
b374d18a | 281 | struct elevator_queue *e = q->elevator; |
9817064b | 282 | struct request *__rq; |
06b86245 | 283 | |
488991e2 AB |
284 | /* |
285 | * Levels of merges: | |
286 | * nomerges: No merges at all attempted | |
287 | * noxmerges: Only simple one-hit cache try | |
288 | * merges: All merge tries attempted | |
289 | */ | |
7460d389 | 290 | if (blk_queue_nomerges(q) || !bio_mergeable(bio)) |
488991e2 AB |
291 | return ELEVATOR_NO_MERGE; |
292 | ||
9817064b JA |
293 | /* |
294 | * First try one-hit cache. | |
295 | */ | |
72ef799b | 296 | if (q->last_merge && elv_bio_merge_ok(q->last_merge, bio)) { |
34fe7c05 CH |
297 | enum elv_merge ret = blk_try_merge(q->last_merge, bio); |
298 | ||
06b86245 TH |
299 | if (ret != ELEVATOR_NO_MERGE) { |
300 | *req = q->last_merge; | |
301 | return ret; | |
302 | } | |
303 | } | |
1da177e4 | 304 | |
488991e2 | 305 | if (blk_queue_noxmerges(q)) |
ac9fafa1 AB |
306 | return ELEVATOR_NO_MERGE; |
307 | ||
9817064b JA |
308 | /* |
309 | * See if our hash lookup can find a potential backmerge. | |
310 | */ | |
4f024f37 | 311 | __rq = elv_rqhash_find(q, bio->bi_iter.bi_sector); |
72ef799b | 312 | if (__rq && elv_bio_merge_ok(__rq, bio)) { |
9817064b | 313 | *req = __rq; |
866663b7 ML |
314 | |
315 | if (blk_discard_mergable(__rq)) | |
316 | return ELEVATOR_DISCARD_MERGE; | |
9817064b JA |
317 | return ELEVATOR_BACK_MERGE; |
318 | } | |
319 | ||
f9cd4bfe JA |
320 | if (e->type->ops.request_merge) |
321 | return e->type->ops.request_merge(q, req, bio); | |
1da177e4 LT |
322 | |
323 | return ELEVATOR_NO_MERGE; | |
324 | } | |
325 | ||
5e84ea3a JA |
326 | /* |
327 | * Attempt to do an insertion back merge. Only check for the case where | |
328 | * we can append 'rq' to an existing request, so we can throw 'rq' away | |
329 | * afterwards. | |
330 | * | |
fd2ef39c JK |
331 | * Returns true if we merged, false otherwise. 'free' will contain all |
332 | * requests that need to be freed. | |
5e84ea3a | 333 | */ |
fd2ef39c JK |
334 | bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq, |
335 | struct list_head *free) | |
5e84ea3a JA |
336 | { |
337 | struct request *__rq; | |
bee0393c | 338 | bool ret; |
5e84ea3a JA |
339 | |
340 | if (blk_queue_nomerges(q)) | |
341 | return false; | |
342 | ||
343 | /* | |
344 | * First try one-hit cache. | |
345 | */ | |
fd2ef39c JK |
346 | if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq)) { |
347 | list_add(&rq->queuelist, free); | |
5e84ea3a | 348 | return true; |
fd2ef39c | 349 | } |
5e84ea3a JA |
350 | |
351 | if (blk_queue_noxmerges(q)) | |
352 | return false; | |
353 | ||
bee0393c | 354 | ret = false; |
5e84ea3a JA |
355 | /* |
356 | * See if our hash lookup can find a potential backmerge. | |
357 | */ | |
bee0393c SL |
358 | while (1) { |
359 | __rq = elv_rqhash_find(q, blk_rq_pos(rq)); | |
360 | if (!__rq || !blk_attempt_req_merge(q, __rq, rq)) | |
361 | break; | |
362 | ||
fd2ef39c | 363 | list_add(&rq->queuelist, free); |
bee0393c SL |
364 | /* The merged request could be merged with others, try again */ |
365 | ret = true; | |
366 | rq = __rq; | |
367 | } | |
27419322 | 368 | |
bee0393c | 369 | return ret; |
5e84ea3a JA |
370 | } |
371 | ||
34fe7c05 CH |
372 | void elv_merged_request(struct request_queue *q, struct request *rq, |
373 | enum elv_merge type) | |
1da177e4 | 374 | { |
b374d18a | 375 | struct elevator_queue *e = q->elevator; |
1da177e4 | 376 | |
f9cd4bfe JA |
377 | if (e->type->ops.request_merged) |
378 | e->type->ops.request_merged(q, rq, type); | |
06b86245 | 379 | |
2e662b65 JA |
380 | if (type == ELEVATOR_BACK_MERGE) |
381 | elv_rqhash_reposition(q, rq); | |
9817064b | 382 | |
06b86245 | 383 | q->last_merge = rq; |
1da177e4 LT |
384 | } |
385 | ||
165125e1 | 386 | void elv_merge_requests(struct request_queue *q, struct request *rq, |
1da177e4 LT |
387 | struct request *next) |
388 | { | |
b374d18a | 389 | struct elevator_queue *e = q->elevator; |
bd166ef1 | 390 | |
f9cd4bfe JA |
391 | if (e->type->ops.requests_merged) |
392 | e->type->ops.requests_merged(q, rq, next); | |
06b86245 | 393 | |
9817064b | 394 | elv_rqhash_reposition(q, rq); |
06b86245 | 395 | q->last_merge = rq; |
1da177e4 LT |
396 | } |
397 | ||
165125e1 | 398 | struct request *elv_latter_request(struct request_queue *q, struct request *rq) |
1da177e4 | 399 | { |
b374d18a | 400 | struct elevator_queue *e = q->elevator; |
1da177e4 | 401 | |
f9cd4bfe JA |
402 | if (e->type->ops.next_request) |
403 | return e->type->ops.next_request(q, rq); | |
bd166ef1 | 404 | |
1da177e4 LT |
405 | return NULL; |
406 | } | |
407 | ||
165125e1 | 408 | struct request *elv_former_request(struct request_queue *q, struct request *rq) |
1da177e4 | 409 | { |
b374d18a | 410 | struct elevator_queue *e = q->elevator; |
1da177e4 | 411 | |
f9cd4bfe JA |
412 | if (e->type->ops.former_request) |
413 | return e->type->ops.former_request(q, rq); | |
bd166ef1 | 414 | |
a1ce35fa | 415 | return NULL; |
1da177e4 LT |
416 | } |
417 | ||
3d1ab40f AV |
418 | #define to_elv(atr) container_of((atr), struct elv_fs_entry, attr) |
419 | ||
420 | static ssize_t | |
421 | elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page) | |
1da177e4 | 422 | { |
3d1ab40f | 423 | struct elv_fs_entry *entry = to_elv(attr); |
b374d18a | 424 | struct elevator_queue *e; |
3d1ab40f AV |
425 | ssize_t error; |
426 | ||
427 | if (!entry->show) | |
428 | return -EIO; | |
429 | ||
b374d18a | 430 | e = container_of(kobj, struct elevator_queue, kobj); |
3d1ab40f | 431 | mutex_lock(&e->sysfs_lock); |
22f746e2 | 432 | error = e->type ? entry->show(e, page) : -ENOENT; |
3d1ab40f AV |
433 | mutex_unlock(&e->sysfs_lock); |
434 | return error; | |
435 | } | |
1da177e4 | 436 | |
3d1ab40f AV |
437 | static ssize_t |
438 | elv_attr_store(struct kobject *kobj, struct attribute *attr, | |
439 | const char *page, size_t length) | |
440 | { | |
3d1ab40f | 441 | struct elv_fs_entry *entry = to_elv(attr); |
b374d18a | 442 | struct elevator_queue *e; |
3d1ab40f | 443 | ssize_t error; |
1da177e4 | 444 | |
3d1ab40f AV |
445 | if (!entry->store) |
446 | return -EIO; | |
1da177e4 | 447 | |
b374d18a | 448 | e = container_of(kobj, struct elevator_queue, kobj); |
3d1ab40f | 449 | mutex_lock(&e->sysfs_lock); |
22f746e2 | 450 | error = e->type ? entry->store(e, page, length) : -ENOENT; |
3d1ab40f AV |
451 | mutex_unlock(&e->sysfs_lock); |
452 | return error; | |
453 | } | |
454 | ||
52cf25d0 | 455 | static const struct sysfs_ops elv_sysfs_ops = { |
3d1ab40f AV |
456 | .show = elv_attr_show, |
457 | .store = elv_attr_store, | |
458 | }; | |
459 | ||
460 | static struct kobj_type elv_ktype = { | |
461 | .sysfs_ops = &elv_sysfs_ops, | |
462 | .release = elevator_release, | |
463 | }; | |
464 | ||
cecf5d87 | 465 | int elv_register_queue(struct request_queue *q, bool uevent) |
3d1ab40f | 466 | { |
5a5bafdc | 467 | struct elevator_queue *e = q->elevator; |
3d1ab40f AV |
468 | int error; |
469 | ||
f0c6ae09 YY |
470 | lockdep_assert_held(&q->sysfs_lock); |
471 | ||
b2d6db58 | 472 | error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched"); |
3d1ab40f | 473 | if (!error) { |
22f746e2 | 474 | struct elv_fs_entry *attr = e->type->elevator_attrs; |
3d1ab40f | 475 | if (attr) { |
e572ec7e AV |
476 | while (attr->attr.name) { |
477 | if (sysfs_create_file(&e->kobj, &attr->attr)) | |
3d1ab40f | 478 | break; |
e572ec7e | 479 | attr++; |
3d1ab40f AV |
480 | } |
481 | } | |
cecf5d87 ML |
482 | if (uevent) |
483 | kobject_uevent(&e->kobj, KOBJ_ADD); | |
484 | ||
181d0663 | 485 | set_bit(ELEVATOR_FLAG_REGISTERED, &e->flags); |
3d1ab40f AV |
486 | } |
487 | return error; | |
1da177e4 | 488 | } |
bc1c1169 | 489 | |
1da177e4 LT |
490 | void elv_unregister_queue(struct request_queue *q) |
491 | { | |
f5ec592d EB |
492 | struct elevator_queue *e = q->elevator; |
493 | ||
f0c6ae09 YY |
494 | lockdep_assert_held(&q->sysfs_lock); |
495 | ||
181d0663 | 496 | if (e && test_and_clear_bit(ELEVATOR_FLAG_REGISTERED, &e->flags)) { |
f8fc877d TH |
497 | kobject_uevent(&e->kobj, KOBJ_REMOVE); |
498 | kobject_del(&e->kobj); | |
f8fc877d | 499 | } |
1da177e4 LT |
500 | } |
501 | ||
e567bf71 | 502 | int elv_register(struct elevator_type *e) |
1da177e4 | 503 | { |
e42cfb1d DLM |
504 | /* insert_requests and dispatch_request are mandatory */ |
505 | if (WARN_ON_ONCE(!e->ops.insert_requests || !e->ops.dispatch_request)) | |
506 | return -EINVAL; | |
507 | ||
3d3c2379 TH |
508 | /* create icq_cache if requested */ |
509 | if (e->icq_size) { | |
510 | if (WARN_ON(e->icq_size < sizeof(struct io_cq)) || | |
511 | WARN_ON(e->icq_align < __alignof__(struct io_cq))) | |
512 | return -EINVAL; | |
513 | ||
514 | snprintf(e->icq_cache_name, sizeof(e->icq_cache_name), | |
515 | "%s_io_cq", e->elevator_name); | |
516 | e->icq_cache = kmem_cache_create(e->icq_cache_name, e->icq_size, | |
517 | e->icq_align, 0, NULL); | |
518 | if (!e->icq_cache) | |
519 | return -ENOMEM; | |
520 | } | |
521 | ||
522 | /* register, don't allow duplicate names */ | |
2a12dcd7 | 523 | spin_lock(&elv_list_lock); |
ffb86425 | 524 | if (__elevator_find(e->elevator_name)) { |
3d3c2379 | 525 | spin_unlock(&elv_list_lock); |
62d2a194 | 526 | kmem_cache_destroy(e->icq_cache); |
3d3c2379 TH |
527 | return -EBUSY; |
528 | } | |
1da177e4 | 529 | list_add_tail(&e->list, &elv_list); |
2a12dcd7 | 530 | spin_unlock(&elv_list_lock); |
1da177e4 | 531 | |
d0b0a81a HT |
532 | printk(KERN_INFO "io scheduler %s registered\n", e->elevator_name); |
533 | ||
3d3c2379 | 534 | return 0; |
1da177e4 LT |
535 | } |
536 | EXPORT_SYMBOL_GPL(elv_register); | |
537 | ||
538 | void elv_unregister(struct elevator_type *e) | |
539 | { | |
3d3c2379 | 540 | /* unregister */ |
2a12dcd7 | 541 | spin_lock(&elv_list_lock); |
1da177e4 | 542 | list_del_init(&e->list); |
2a12dcd7 | 543 | spin_unlock(&elv_list_lock); |
3d3c2379 TH |
544 | |
545 | /* | |
546 | * Destroy icq_cache if it exists. icq's are RCU managed. Make | |
547 | * sure all RCU operations are complete before proceeding. | |
548 | */ | |
549 | if (e->icq_cache) { | |
550 | rcu_barrier(); | |
551 | kmem_cache_destroy(e->icq_cache); | |
552 | e->icq_cache = NULL; | |
553 | } | |
1da177e4 LT |
554 | } |
555 | EXPORT_SYMBOL_GPL(elv_unregister); | |
556 | ||
61db437d DLM |
557 | static inline bool elv_support_iosched(struct request_queue *q) |
558 | { | |
6251b754 | 559 | if (!queue_is_mq(q) || |
7a7c5e71 | 560 | (q->tag_set && (q->tag_set->flags & BLK_MQ_F_NO_SCHED))) |
61db437d DLM |
561 | return false; |
562 | return true; | |
563 | } | |
564 | ||
131d08e1 | 565 | /* |
a0958ba7 DLM |
566 | * For single queue devices, default to using mq-deadline. If we have multiple |
567 | * queues or mq-deadline is not available, default to "none". | |
568 | */ | |
569 | static struct elevator_type *elevator_get_default(struct request_queue *q) | |
570 | { | |
90b71980 BVA |
571 | if (q->tag_set && q->tag_set->flags & BLK_MQ_F_NO_SCHED_BY_DEFAULT) |
572 | return NULL; | |
573 | ||
580dca81 | 574 | if (q->nr_hw_queues != 1 && |
079a2e3e | 575 | !blk_mq_is_shared_tags(q->tag_set->flags)) |
a0958ba7 DLM |
576 | return NULL; |
577 | ||
81eaca44 | 578 | return elevator_find_get(q, "mq-deadline"); |
a0958ba7 DLM |
579 | } |
580 | ||
581 | /* | |
582 | * Get the first elevator providing the features required by the request queue. | |
583 | * Default to "none" if no matching elevator is found. | |
584 | */ | |
585 | static struct elevator_type *elevator_get_by_features(struct request_queue *q) | |
586 | { | |
a2614255 | 587 | struct elevator_type *e, *found = NULL; |
a0958ba7 DLM |
588 | |
589 | spin_lock(&elv_list_lock); | |
590 | ||
591 | list_for_each_entry(e, &elv_list, list) { | |
ffb86425 | 592 | if (elv_support_features(q, e)) { |
a2614255 | 593 | found = e; |
a0958ba7 | 594 | break; |
a2614255 | 595 | } |
a0958ba7 DLM |
596 | } |
597 | ||
dd6f7f17 | 598 | if (found && !elevator_tryget(found)) |
a2614255 | 599 | found = NULL; |
a0958ba7 DLM |
600 | |
601 | spin_unlock(&elv_list_lock); | |
a2614255 | 602 | return found; |
a0958ba7 DLM |
603 | } |
604 | ||
605 | /* | |
606 | * For a device queue that has no required features, use the default elevator | |
607 | * settings. Otherwise, use the first elevator available matching the required | |
608 | * features. If no suitable elevator is find or if the chosen elevator | |
609 | * initialization fails, fall back to the "none" elevator (no elevator). | |
131d08e1 | 610 | */ |
954b4a5c | 611 | void elevator_init_mq(struct request_queue *q) |
131d08e1 CH |
612 | { |
613 | struct elevator_type *e; | |
954b4a5c | 614 | int err; |
131d08e1 | 615 | |
61db437d | 616 | if (!elv_support_iosched(q)) |
954b4a5c | 617 | return; |
61db437d | 618 | |
75e6c00f | 619 | WARN_ON_ONCE(blk_queue_registered(q)); |
c48dac13 | 620 | |
131d08e1 | 621 | if (unlikely(q->elevator)) |
954b4a5c | 622 | return; |
131d08e1 | 623 | |
a0958ba7 DLM |
624 | if (!q->required_elevator_features) |
625 | e = elevator_get_default(q); | |
626 | else | |
627 | e = elevator_get_by_features(q); | |
131d08e1 | 628 | if (!e) |
954b4a5c | 629 | return; |
131d08e1 | 630 | |
245a489e ML |
631 | /* |
632 | * We are called before adding disk, when there isn't any FS I/O, | |
633 | * so freezing queue plus canceling dispatch work is enough to | |
634 | * drain any dispatch activities originated from passthrough | |
635 | * requests, then no need to quiesce queue which may add long boot | |
636 | * latency, especially when lots of disks are involved. | |
637 | */ | |
737eb78e | 638 | blk_mq_freeze_queue(q); |
245a489e | 639 | blk_mq_cancel_work_sync(q); |
737eb78e | 640 | |
131d08e1 | 641 | err = blk_mq_init_sched(q, e); |
737eb78e | 642 | |
737eb78e DLM |
643 | blk_mq_unfreeze_queue(q); |
644 | ||
954b4a5c DLM |
645 | if (err) { |
646 | pr_warn("\"%s\" elevator initialization failed, " | |
647 | "falling back to \"none\"\n", e->elevator_name); | |
954b4a5c | 648 | } |
8ed40ee3 JC |
649 | |
650 | elevator_put(e); | |
131d08e1 | 651 | } |
131d08e1 | 652 | |
1da177e4 LT |
653 | /* |
654 | * switch to new_e io scheduler. be careful not to introduce deadlocks - | |
655 | * we don't free the old io scheduler, before we have allocated what we | |
656 | * need for the new one. this way we have a chance of going back to the old | |
cb98fc8b | 657 | * one, if the new one fails init for some reason. |
1da177e4 | 658 | */ |
8237c01f | 659 | int elevator_switch(struct request_queue *q, struct elevator_type *new_e) |
1da177e4 | 660 | { |
64b36075 | 661 | int ret; |
1da177e4 | 662 | |
14a23498 BVA |
663 | lockdep_assert_held(&q->sysfs_lock); |
664 | ||
a1ce35fa JA |
665 | blk_mq_freeze_queue(q); |
666 | blk_mq_quiesce_queue(q); | |
4722dc52 | 667 | |
64b36075 CH |
668 | if (q->elevator) { |
669 | elv_unregister_queue(q); | |
670 | elevator_exit(q); | |
671 | } | |
672 | ||
673 | ret = blk_mq_init_sched(q, new_e); | |
674 | if (ret) | |
675 | goto out_unfreeze; | |
676 | ||
677 | ret = elv_register_queue(q, true); | |
678 | if (ret) { | |
679 | elevator_exit(q); | |
680 | goto out_unfreeze; | |
681 | } | |
682 | blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name); | |
1da177e4 | 683 | |
64b36075 | 684 | out_unfreeze: |
a1ce35fa JA |
685 | blk_mq_unquiesce_queue(q); |
686 | blk_mq_unfreeze_queue(q); | |
64b36075 CH |
687 | return ret; |
688 | } | |
689 | ||
690 | void elevator_disable(struct request_queue *q) | |
691 | { | |
692 | lockdep_assert_held(&q->sysfs_lock); | |
75ad23bc | 693 | |
64b36075 CH |
694 | blk_mq_freeze_queue(q); |
695 | blk_mq_quiesce_queue(q); | |
696 | ||
697 | elv_unregister_queue(q); | |
698 | elevator_exit(q); | |
699 | blk_queue_flag_clear(QUEUE_FLAG_SQ_SCHED, q); | |
700 | q->elevator = NULL; | |
701 | q->nr_requests = q->tag_set->queue_depth; | |
702 | blk_add_trace_msg(q, "elv switch: none"); | |
703 | ||
704 | blk_mq_unquiesce_queue(q); | |
705 | blk_mq_unfreeze_queue(q); | |
1da177e4 LT |
706 | } |
707 | ||
5dd531a0 JA |
708 | /* |
709 | * Switch this queue to the given IO scheduler. | |
710 | */ | |
58367c8a | 711 | static int elevator_change(struct request_queue *q, const char *elevator_name) |
1da177e4 | 712 | { |
1da177e4 | 713 | struct elevator_type *e; |
8ed40ee3 | 714 | int ret; |
1da177e4 | 715 | |
e9a823fb | 716 | /* Make sure queue is not in the middle of being removed */ |
58c898ba | 717 | if (!blk_queue_registered(q)) |
e9a823fb DJ |
718 | return -ENOENT; |
719 | ||
bd166ef1 JA |
720 | /* |
721 | * Special case for mq, turn off scheduling | |
722 | */ | |
58367c8a | 723 | if (!strncmp(elevator_name, "none", 4)) { |
64b36075 CH |
724 | if (q->elevator) |
725 | elevator_disable(q); | |
726 | return 0; | |
fbd72127 | 727 | } |
cd43e26f | 728 | |
ffb86425 | 729 | if (q->elevator && elevator_match(q->elevator->type, elevator_name)) |
b54c2ad9 JC |
730 | return 0; |
731 | ||
81eaca44 CH |
732 | e = elevator_find_get(q, elevator_name); |
733 | if (!e) { | |
734 | request_module("%s-iosched", elevator_name); | |
735 | e = elevator_find_get(q, elevator_name); | |
736 | if (!e) | |
737 | return -EINVAL; | |
738 | } | |
8ed40ee3 JC |
739 | ret = elevator_switch(q, e); |
740 | elevator_put(e); | |
741 | return ret; | |
5dd531a0 | 742 | } |
7c8a3679 | 743 | |
58367c8a | 744 | ssize_t elv_iosched_store(struct request_queue *q, const char *buf, |
5dd531a0 JA |
745 | size_t count) |
746 | { | |
58367c8a | 747 | char elevator_name[ELV_NAME_MAX]; |
5dd531a0 JA |
748 | int ret; |
749 | ||
6251b754 | 750 | if (!elv_support_iosched(q)) |
5dd531a0 JA |
751 | return count; |
752 | ||
58367c8a CH |
753 | strlcpy(elevator_name, buf, sizeof(elevator_name)); |
754 | ret = elevator_change(q, strstrip(elevator_name)); | |
5dd531a0 JA |
755 | if (!ret) |
756 | return count; | |
5dd531a0 | 757 | return ret; |
1da177e4 LT |
758 | } |
759 | ||
165125e1 | 760 | ssize_t elv_iosched_show(struct request_queue *q, char *name) |
1da177e4 | 761 | { |
16095af2 CH |
762 | struct elevator_queue *eq = q->elevator; |
763 | struct elevator_type *cur = NULL, *e; | |
1da177e4 LT |
764 | int len = 0; |
765 | ||
aae2a643 | 766 | if (!elv_support_iosched(q)) |
cd43e26f MP |
767 | return sprintf(name, "none\n"); |
768 | ||
bd166ef1 JA |
769 | if (!q->elevator) |
770 | len += sprintf(name+len, "[none] "); | |
771 | else | |
16095af2 | 772 | cur = eq->type; |
cd43e26f | 773 | |
2a12dcd7 | 774 | spin_lock(&elv_list_lock); |
16095af2 | 775 | list_for_each_entry(e, &elv_list, list) { |
2eef17a2 | 776 | if (e == cur) { |
16095af2 | 777 | len += sprintf(name+len, "[%s] ", cur->elevator_name); |
bd166ef1 JA |
778 | continue; |
779 | } | |
ffb86425 | 780 | if (elv_support_features(q, e)) |
16095af2 | 781 | len += sprintf(name+len, "%s ", e->elevator_name); |
1da177e4 | 782 | } |
2a12dcd7 | 783 | spin_unlock(&elv_list_lock); |
1da177e4 | 784 | |
344e9ffc | 785 | if (q->elevator) |
bd166ef1 JA |
786 | len += sprintf(name+len, "none"); |
787 | ||
1da177e4 LT |
788 | len += sprintf(len+name, "\n"); |
789 | return len; | |
790 | } | |
791 | ||
165125e1 JA |
792 | struct request *elv_rb_former_request(struct request_queue *q, |
793 | struct request *rq) | |
2e662b65 JA |
794 | { |
795 | struct rb_node *rbprev = rb_prev(&rq->rb_node); | |
796 | ||
797 | if (rbprev) | |
798 | return rb_entry_rq(rbprev); | |
799 | ||
800 | return NULL; | |
801 | } | |
2e662b65 JA |
802 | EXPORT_SYMBOL(elv_rb_former_request); |
803 | ||
165125e1 JA |
804 | struct request *elv_rb_latter_request(struct request_queue *q, |
805 | struct request *rq) | |
2e662b65 JA |
806 | { |
807 | struct rb_node *rbnext = rb_next(&rq->rb_node); | |
808 | ||
809 | if (rbnext) | |
810 | return rb_entry_rq(rbnext); | |
811 | ||
812 | return NULL; | |
813 | } | |
2e662b65 | 814 | EXPORT_SYMBOL(elv_rb_latter_request); |
f8db3835 JK |
815 | |
816 | static int __init elevator_setup(char *str) | |
817 | { | |
818 | pr_warn("Kernel parameter elevator= does not have any effect anymore.\n" | |
819 | "Please use sysfs to set IO scheduler for individual devices.\n"); | |
820 | return 1; | |
821 | } | |
822 | ||
823 | __setup("elevator=", elevator_setup); |