Commit | Line | Data |
---|---|---|
00e04393 OS |
1 | /* |
2 | * The Kyber I/O scheduler. Controls latency by throttling queue depths using | |
3 | * scalable techniques. | |
4 | * | |
5 | * Copyright (C) 2017 Facebook | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or | |
8 | * modify it under the terms of the GNU General Public | |
9 | * License v2 as published by the Free Software Foundation. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * along with this program. If not, see <https://www.gnu.org/licenses/>. | |
18 | */ | |
19 | ||
20 | #include <linux/kernel.h> | |
21 | #include <linux/blkdev.h> | |
22 | #include <linux/blk-mq.h> | |
23 | #include <linux/elevator.h> | |
24 | #include <linux/module.h> | |
25 | #include <linux/sbitmap.h> | |
26 | ||
27 | #include "blk.h" | |
28 | #include "blk-mq.h" | |
16b738f6 | 29 | #include "blk-mq-debugfs.h" |
00e04393 OS |
30 | #include "blk-mq-sched.h" |
31 | #include "blk-mq-tag.h" | |
32 | #include "blk-stat.h" | |
33 | ||
34 | /* Scheduling domains. */ | |
35 | enum { | |
36 | KYBER_READ, | |
37 | KYBER_SYNC_WRITE, | |
38 | KYBER_OTHER, /* Async writes, discard, etc. */ | |
39 | KYBER_NUM_DOMAINS, | |
40 | }; | |
41 | ||
42 | enum { | |
43 | KYBER_MIN_DEPTH = 256, | |
44 | ||
45 | /* | |
46 | * In order to prevent starvation of synchronous requests by a flood of | |
47 | * asynchronous requests, we reserve 25% of requests for synchronous | |
48 | * operations. | |
49 | */ | |
50 | KYBER_ASYNC_PERCENT = 75, | |
51 | }; | |
52 | ||
53 | /* | |
54 | * Initial device-wide depths for each scheduling domain. | |
55 | * | |
56 | * Even for fast devices with lots of tags like NVMe, you can saturate | |
57 | * the device with only a fraction of the maximum possible queue depth. | |
58 | * So, we cap these to a reasonable value. | |
59 | */ | |
60 | static const unsigned int kyber_depth[] = { | |
61 | [KYBER_READ] = 256, | |
62 | [KYBER_SYNC_WRITE] = 128, | |
63 | [KYBER_OTHER] = 64, | |
64 | }; | |
65 | ||
66 | /* | |
67 | * Scheduling domain batch sizes. We favor reads. | |
68 | */ | |
69 | static const unsigned int kyber_batch_size[] = { | |
70 | [KYBER_READ] = 16, | |
71 | [KYBER_SYNC_WRITE] = 8, | |
72 | [KYBER_OTHER] = 8, | |
73 | }; | |
74 | ||
75 | struct kyber_queue_data { | |
76 | struct request_queue *q; | |
77 | ||
78 | struct blk_stat_callback *cb; | |
79 | ||
80 | /* | |
81 | * The device is divided into multiple scheduling domains based on the | |
82 | * request type. Each domain has a fixed number of in-flight requests of | |
83 | * that type device-wide, limited by these tokens. | |
84 | */ | |
85 | struct sbitmap_queue domain_tokens[KYBER_NUM_DOMAINS]; | |
86 | ||
87 | /* | |
88 | * Async request percentage, converted to per-word depth for | |
89 | * sbitmap_get_shallow(). | |
90 | */ | |
91 | unsigned int async_depth; | |
92 | ||
93 | /* Target latencies in nanoseconds. */ | |
94 | u64 read_lat_nsec, write_lat_nsec; | |
95 | }; | |
96 | ||
97 | struct kyber_hctx_data { | |
98 | spinlock_t lock; | |
99 | struct list_head rqs[KYBER_NUM_DOMAINS]; | |
100 | unsigned int cur_domain; | |
101 | unsigned int batching; | |
ac6424b9 | 102 | wait_queue_entry_t domain_wait[KYBER_NUM_DOMAINS]; |
fcf38cdf | 103 | struct sbq_wait_state *domain_ws[KYBER_NUM_DOMAINS]; |
00e04393 OS |
104 | atomic_t wait_index[KYBER_NUM_DOMAINS]; |
105 | }; | |
106 | ||
fcf38cdf OS |
107 | static int kyber_domain_wake(wait_queue_entry_t *wait, unsigned mode, int flags, |
108 | void *key); | |
109 | ||
a37244e4 | 110 | static int rq_sched_domain(const struct request *rq) |
00e04393 OS |
111 | { |
112 | unsigned int op = rq->cmd_flags; | |
113 | ||
114 | if ((op & REQ_OP_MASK) == REQ_OP_READ) | |
115 | return KYBER_READ; | |
116 | else if ((op & REQ_OP_MASK) == REQ_OP_WRITE && op_is_sync(op)) | |
117 | return KYBER_SYNC_WRITE; | |
118 | else | |
119 | return KYBER_OTHER; | |
120 | } | |
121 | ||
122 | enum { | |
123 | NONE = 0, | |
124 | GOOD = 1, | |
125 | GREAT = 2, | |
126 | BAD = -1, | |
127 | AWFUL = -2, | |
128 | }; | |
129 | ||
130 | #define IS_GOOD(status) ((status) > 0) | |
131 | #define IS_BAD(status) ((status) < 0) | |
132 | ||
133 | static int kyber_lat_status(struct blk_stat_callback *cb, | |
134 | unsigned int sched_domain, u64 target) | |
135 | { | |
136 | u64 latency; | |
137 | ||
138 | if (!cb->stat[sched_domain].nr_samples) | |
139 | return NONE; | |
140 | ||
141 | latency = cb->stat[sched_domain].mean; | |
142 | if (latency >= 2 * target) | |
143 | return AWFUL; | |
144 | else if (latency > target) | |
145 | return BAD; | |
146 | else if (latency <= target / 2) | |
147 | return GREAT; | |
148 | else /* (latency <= target) */ | |
149 | return GOOD; | |
150 | } | |
151 | ||
152 | /* | |
153 | * Adjust the read or synchronous write depth given the status of reads and | |
154 | * writes. The goal is that the latencies of the two domains are fair (i.e., if | |
155 | * one is good, then the other is good). | |
156 | */ | |
157 | static void kyber_adjust_rw_depth(struct kyber_queue_data *kqd, | |
158 | unsigned int sched_domain, int this_status, | |
159 | int other_status) | |
160 | { | |
161 | unsigned int orig_depth, depth; | |
162 | ||
163 | /* | |
164 | * If this domain had no samples, or reads and writes are both good or | |
165 | * both bad, don't adjust the depth. | |
166 | */ | |
167 | if (this_status == NONE || | |
168 | (IS_GOOD(this_status) && IS_GOOD(other_status)) || | |
169 | (IS_BAD(this_status) && IS_BAD(other_status))) | |
170 | return; | |
171 | ||
172 | orig_depth = depth = kqd->domain_tokens[sched_domain].sb.depth; | |
173 | ||
174 | if (other_status == NONE) { | |
175 | depth++; | |
176 | } else { | |
177 | switch (this_status) { | |
178 | case GOOD: | |
179 | if (other_status == AWFUL) | |
180 | depth -= max(depth / 4, 1U); | |
181 | else | |
182 | depth -= max(depth / 8, 1U); | |
183 | break; | |
184 | case GREAT: | |
185 | if (other_status == AWFUL) | |
186 | depth /= 2; | |
187 | else | |
188 | depth -= max(depth / 4, 1U); | |
189 | break; | |
190 | case BAD: | |
191 | depth++; | |
192 | break; | |
193 | case AWFUL: | |
194 | if (other_status == GREAT) | |
195 | depth += 2; | |
196 | else | |
197 | depth++; | |
198 | break; | |
199 | } | |
200 | } | |
201 | ||
202 | depth = clamp(depth, 1U, kyber_depth[sched_domain]); | |
203 | if (depth != orig_depth) | |
204 | sbitmap_queue_resize(&kqd->domain_tokens[sched_domain], depth); | |
205 | } | |
206 | ||
207 | /* | |
208 | * Adjust the depth of other requests given the status of reads and synchronous | |
209 | * writes. As long as either domain is doing fine, we don't throttle, but if | |
210 | * both domains are doing badly, we throttle heavily. | |
211 | */ | |
212 | static void kyber_adjust_other_depth(struct kyber_queue_data *kqd, | |
213 | int read_status, int write_status, | |
214 | bool have_samples) | |
215 | { | |
216 | unsigned int orig_depth, depth; | |
217 | int status; | |
218 | ||
219 | orig_depth = depth = kqd->domain_tokens[KYBER_OTHER].sb.depth; | |
220 | ||
221 | if (read_status == NONE && write_status == NONE) { | |
222 | depth += 2; | |
223 | } else if (have_samples) { | |
224 | if (read_status == NONE) | |
225 | status = write_status; | |
226 | else if (write_status == NONE) | |
227 | status = read_status; | |
228 | else | |
229 | status = max(read_status, write_status); | |
230 | switch (status) { | |
231 | case GREAT: | |
232 | depth += 2; | |
233 | break; | |
234 | case GOOD: | |
235 | depth++; | |
236 | break; | |
237 | case BAD: | |
238 | depth -= max(depth / 4, 1U); | |
239 | break; | |
240 | case AWFUL: | |
241 | depth /= 2; | |
242 | break; | |
243 | } | |
244 | } | |
245 | ||
246 | depth = clamp(depth, 1U, kyber_depth[KYBER_OTHER]); | |
247 | if (depth != orig_depth) | |
248 | sbitmap_queue_resize(&kqd->domain_tokens[KYBER_OTHER], depth); | |
249 | } | |
250 | ||
251 | /* | |
252 | * Apply heuristics for limiting queue depths based on gathered latency | |
253 | * statistics. | |
254 | */ | |
255 | static void kyber_stat_timer_fn(struct blk_stat_callback *cb) | |
256 | { | |
257 | struct kyber_queue_data *kqd = cb->data; | |
258 | int read_status, write_status; | |
259 | ||
260 | read_status = kyber_lat_status(cb, KYBER_READ, kqd->read_lat_nsec); | |
261 | write_status = kyber_lat_status(cb, KYBER_SYNC_WRITE, kqd->write_lat_nsec); | |
262 | ||
263 | kyber_adjust_rw_depth(kqd, KYBER_READ, read_status, write_status); | |
264 | kyber_adjust_rw_depth(kqd, KYBER_SYNC_WRITE, write_status, read_status); | |
265 | kyber_adjust_other_depth(kqd, read_status, write_status, | |
266 | cb->stat[KYBER_OTHER].nr_samples != 0); | |
267 | ||
268 | /* | |
269 | * Continue monitoring latencies if we aren't hitting the targets or | |
270 | * we're still throttling other requests. | |
271 | */ | |
272 | if (!blk_stat_is_active(kqd->cb) && | |
273 | ((IS_BAD(read_status) || IS_BAD(write_status) || | |
274 | kqd->domain_tokens[KYBER_OTHER].sb.depth < kyber_depth[KYBER_OTHER]))) | |
275 | blk_stat_activate_msecs(kqd->cb, 100); | |
276 | } | |
277 | ||
278 | static unsigned int kyber_sched_tags_shift(struct kyber_queue_data *kqd) | |
279 | { | |
280 | /* | |
281 | * All of the hardware queues have the same depth, so we can just grab | |
282 | * the shift of the first one. | |
283 | */ | |
284 | return kqd->q->queue_hw_ctx[0]->sched_tags->bitmap_tags.sb.shift; | |
285 | } | |
286 | ||
287 | static struct kyber_queue_data *kyber_queue_data_alloc(struct request_queue *q) | |
288 | { | |
289 | struct kyber_queue_data *kqd; | |
290 | unsigned int max_tokens; | |
291 | unsigned int shift; | |
292 | int ret = -ENOMEM; | |
293 | int i; | |
294 | ||
295 | kqd = kmalloc_node(sizeof(*kqd), GFP_KERNEL, q->node); | |
296 | if (!kqd) | |
297 | goto err; | |
298 | kqd->q = q; | |
299 | ||
300 | kqd->cb = blk_stat_alloc_callback(kyber_stat_timer_fn, rq_sched_domain, | |
301 | KYBER_NUM_DOMAINS, kqd); | |
302 | if (!kqd->cb) | |
303 | goto err_kqd; | |
304 | ||
305 | /* | |
306 | * The maximum number of tokens for any scheduling domain is at least | |
307 | * the queue depth of a single hardware queue. If the hardware doesn't | |
308 | * have many tags, still provide a reasonable number. | |
309 | */ | |
310 | max_tokens = max_t(unsigned int, q->tag_set->queue_depth, | |
311 | KYBER_MIN_DEPTH); | |
312 | for (i = 0; i < KYBER_NUM_DOMAINS; i++) { | |
313 | WARN_ON(!kyber_depth[i]); | |
314 | WARN_ON(!kyber_batch_size[i]); | |
315 | ret = sbitmap_queue_init_node(&kqd->domain_tokens[i], | |
316 | max_tokens, -1, false, GFP_KERNEL, | |
317 | q->node); | |
318 | if (ret) { | |
319 | while (--i >= 0) | |
320 | sbitmap_queue_free(&kqd->domain_tokens[i]); | |
321 | goto err_cb; | |
322 | } | |
323 | sbitmap_queue_resize(&kqd->domain_tokens[i], kyber_depth[i]); | |
324 | } | |
325 | ||
326 | shift = kyber_sched_tags_shift(kqd); | |
327 | kqd->async_depth = (1U << shift) * KYBER_ASYNC_PERCENT / 100U; | |
328 | ||
329 | kqd->read_lat_nsec = 2000000ULL; | |
330 | kqd->write_lat_nsec = 10000000ULL; | |
331 | ||
332 | return kqd; | |
333 | ||
334 | err_cb: | |
335 | blk_stat_free_callback(kqd->cb); | |
336 | err_kqd: | |
337 | kfree(kqd); | |
338 | err: | |
339 | return ERR_PTR(ret); | |
340 | } | |
341 | ||
342 | static int kyber_init_sched(struct request_queue *q, struct elevator_type *e) | |
343 | { | |
344 | struct kyber_queue_data *kqd; | |
345 | struct elevator_queue *eq; | |
346 | ||
347 | eq = elevator_alloc(q, e); | |
348 | if (!eq) | |
349 | return -ENOMEM; | |
350 | ||
351 | kqd = kyber_queue_data_alloc(q); | |
352 | if (IS_ERR(kqd)) { | |
353 | kobject_put(&eq->kobj); | |
354 | return PTR_ERR(kqd); | |
355 | } | |
356 | ||
357 | eq->elevator_data = kqd; | |
358 | q->elevator = eq; | |
359 | ||
360 | blk_stat_add_callback(q, kqd->cb); | |
361 | ||
362 | return 0; | |
363 | } | |
364 | ||
365 | static void kyber_exit_sched(struct elevator_queue *e) | |
366 | { | |
367 | struct kyber_queue_data *kqd = e->elevator_data; | |
368 | struct request_queue *q = kqd->q; | |
369 | int i; | |
370 | ||
371 | blk_stat_remove_callback(q, kqd->cb); | |
372 | ||
373 | for (i = 0; i < KYBER_NUM_DOMAINS; i++) | |
374 | sbitmap_queue_free(&kqd->domain_tokens[i]); | |
375 | blk_stat_free_callback(kqd->cb); | |
376 | kfree(kqd); | |
377 | } | |
378 | ||
379 | static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) | |
380 | { | |
28820640 | 381 | struct kyber_queue_data *kqd = hctx->queue->elevator->elevator_data; |
00e04393 OS |
382 | struct kyber_hctx_data *khd; |
383 | int i; | |
384 | ||
385 | khd = kmalloc_node(sizeof(*khd), GFP_KERNEL, hctx->numa_node); | |
386 | if (!khd) | |
387 | return -ENOMEM; | |
388 | ||
389 | spin_lock_init(&khd->lock); | |
390 | ||
391 | for (i = 0; i < KYBER_NUM_DOMAINS; i++) { | |
392 | INIT_LIST_HEAD(&khd->rqs[i]); | |
fcf38cdf OS |
393 | init_waitqueue_func_entry(&khd->domain_wait[i], |
394 | kyber_domain_wake); | |
395 | khd->domain_wait[i].private = hctx; | |
2055da97 | 396 | INIT_LIST_HEAD(&khd->domain_wait[i].entry); |
00e04393 OS |
397 | atomic_set(&khd->wait_index[i], 0); |
398 | } | |
399 | ||
400 | khd->cur_domain = 0; | |
401 | khd->batching = 0; | |
402 | ||
403 | hctx->sched_data = khd; | |
28820640 JA |
404 | sbitmap_queue_min_shallow_depth(&hctx->sched_tags->bitmap_tags, |
405 | kqd->async_depth); | |
00e04393 OS |
406 | |
407 | return 0; | |
408 | } | |
409 | ||
410 | static void kyber_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx) | |
411 | { | |
412 | kfree(hctx->sched_data); | |
413 | } | |
414 | ||
415 | static int rq_get_domain_token(struct request *rq) | |
416 | { | |
417 | return (long)rq->elv.priv[0]; | |
418 | } | |
419 | ||
420 | static void rq_set_domain_token(struct request *rq, int token) | |
421 | { | |
422 | rq->elv.priv[0] = (void *)(long)token; | |
423 | } | |
424 | ||
425 | static void rq_clear_domain_token(struct kyber_queue_data *kqd, | |
426 | struct request *rq) | |
427 | { | |
428 | unsigned int sched_domain; | |
429 | int nr; | |
430 | ||
431 | nr = rq_get_domain_token(rq); | |
432 | if (nr != -1) { | |
433 | sched_domain = rq_sched_domain(rq); | |
434 | sbitmap_queue_clear(&kqd->domain_tokens[sched_domain], nr, | |
435 | rq->mq_ctx->cpu); | |
436 | } | |
437 | } | |
438 | ||
5bbf4e5a | 439 | static void kyber_limit_depth(unsigned int op, struct blk_mq_alloc_data *data) |
00e04393 | 440 | { |
00e04393 OS |
441 | /* |
442 | * We use the scheduler tags as per-hardware queue queueing tokens. | |
443 | * Async requests can be limited at this stage. | |
444 | */ | |
5bbf4e5a CH |
445 | if (!op_is_sync(op)) { |
446 | struct kyber_queue_data *kqd = data->q->elevator->elevator_data; | |
447 | ||
00e04393 | 448 | data->shallow_depth = kqd->async_depth; |
5bbf4e5a CH |
449 | } |
450 | } | |
00e04393 | 451 | |
5bbf4e5a CH |
452 | static void kyber_prepare_request(struct request *rq, struct bio *bio) |
453 | { | |
454 | rq_set_domain_token(rq, -1); | |
00e04393 OS |
455 | } |
456 | ||
7b9e9361 | 457 | static void kyber_finish_request(struct request *rq) |
00e04393 | 458 | { |
7b9e9361 | 459 | struct kyber_queue_data *kqd = rq->q->elevator->elevator_data; |
00e04393 OS |
460 | |
461 | rq_clear_domain_token(kqd, rq); | |
00e04393 OS |
462 | } |
463 | ||
464 | static void kyber_completed_request(struct request *rq) | |
465 | { | |
466 | struct request_queue *q = rq->q; | |
467 | struct kyber_queue_data *kqd = q->elevator->elevator_data; | |
468 | unsigned int sched_domain; | |
469 | u64 now, latency, target; | |
470 | ||
471 | /* | |
472 | * Check if this request met our latency goal. If not, quickly gather | |
473 | * some statistics and start throttling. | |
474 | */ | |
475 | sched_domain = rq_sched_domain(rq); | |
476 | switch (sched_domain) { | |
477 | case KYBER_READ: | |
478 | target = kqd->read_lat_nsec; | |
479 | break; | |
480 | case KYBER_SYNC_WRITE: | |
481 | target = kqd->write_lat_nsec; | |
482 | break; | |
483 | default: | |
484 | return; | |
485 | } | |
486 | ||
487 | /* If we are already monitoring latencies, don't check again. */ | |
488 | if (blk_stat_is_active(kqd->cb)) | |
489 | return; | |
490 | ||
544ccc8d OS |
491 | now = ktime_get_ns(); |
492 | if (now < rq->io_start_time_ns) | |
00e04393 OS |
493 | return; |
494 | ||
544ccc8d | 495 | latency = now - rq->io_start_time_ns; |
00e04393 OS |
496 | |
497 | if (latency > target) | |
498 | blk_stat_activate_msecs(kqd->cb, 10); | |
499 | } | |
500 | ||
501 | static void kyber_flush_busy_ctxs(struct kyber_hctx_data *khd, | |
502 | struct blk_mq_hw_ctx *hctx) | |
503 | { | |
504 | LIST_HEAD(rq_list); | |
505 | struct request *rq, *next; | |
506 | ||
507 | blk_mq_flush_busy_ctxs(hctx, &rq_list); | |
508 | list_for_each_entry_safe(rq, next, &rq_list, queuelist) { | |
509 | unsigned int sched_domain; | |
510 | ||
511 | sched_domain = rq_sched_domain(rq); | |
512 | list_move_tail(&rq->queuelist, &khd->rqs[sched_domain]); | |
513 | } | |
514 | } | |
515 | ||
ac6424b9 | 516 | static int kyber_domain_wake(wait_queue_entry_t *wait, unsigned mode, int flags, |
00e04393 OS |
517 | void *key) |
518 | { | |
519 | struct blk_mq_hw_ctx *hctx = READ_ONCE(wait->private); | |
520 | ||
2055da97 | 521 | list_del_init(&wait->entry); |
00e04393 OS |
522 | blk_mq_run_hw_queue(hctx, true); |
523 | return 1; | |
524 | } | |
525 | ||
526 | static int kyber_get_domain_token(struct kyber_queue_data *kqd, | |
527 | struct kyber_hctx_data *khd, | |
528 | struct blk_mq_hw_ctx *hctx) | |
529 | { | |
530 | unsigned int sched_domain = khd->cur_domain; | |
531 | struct sbitmap_queue *domain_tokens = &kqd->domain_tokens[sched_domain]; | |
ac6424b9 | 532 | wait_queue_entry_t *wait = &khd->domain_wait[sched_domain]; |
00e04393 OS |
533 | struct sbq_wait_state *ws; |
534 | int nr; | |
535 | ||
536 | nr = __sbitmap_queue_get(domain_tokens); | |
00e04393 OS |
537 | |
538 | /* | |
539 | * If we failed to get a domain token, make sure the hardware queue is | |
540 | * run when one becomes available. Note that this is serialized on | |
541 | * khd->lock, but we still need to be careful about the waker. | |
542 | */ | |
fcf38cdf | 543 | if (nr < 0 && list_empty_careful(&wait->entry)) { |
00e04393 OS |
544 | ws = sbq_wait_ptr(domain_tokens, |
545 | &khd->wait_index[sched_domain]); | |
fcf38cdf | 546 | khd->domain_ws[sched_domain] = ws; |
00e04393 OS |
547 | add_wait_queue(&ws->wait, wait); |
548 | ||
549 | /* | |
550 | * Try again in case a token was freed before we got on the wait | |
fcf38cdf | 551 | * queue. |
00e04393 OS |
552 | */ |
553 | nr = __sbitmap_queue_get(domain_tokens); | |
fcf38cdf | 554 | } |
8cf46660 | 555 | |
fcf38cdf OS |
556 | /* |
557 | * If we got a token while we were on the wait queue, remove ourselves | |
558 | * from the wait queue to ensure that all wake ups make forward | |
559 | * progress. It's possible that the waker already deleted the entry | |
560 | * between the !list_empty_careful() check and us grabbing the lock, but | |
561 | * list_del_init() is okay with that. | |
562 | */ | |
563 | if (nr >= 0 && !list_empty_careful(&wait->entry)) { | |
564 | ws = khd->domain_ws[sched_domain]; | |
565 | spin_lock_irq(&ws->wait.lock); | |
566 | list_del_init(&wait->entry); | |
567 | spin_unlock_irq(&ws->wait.lock); | |
00e04393 | 568 | } |
fcf38cdf | 569 | |
00e04393 OS |
570 | return nr; |
571 | } | |
572 | ||
573 | static struct request * | |
574 | kyber_dispatch_cur_domain(struct kyber_queue_data *kqd, | |
575 | struct kyber_hctx_data *khd, | |
576 | struct blk_mq_hw_ctx *hctx, | |
577 | bool *flushed) | |
578 | { | |
579 | struct list_head *rqs; | |
580 | struct request *rq; | |
581 | int nr; | |
582 | ||
583 | rqs = &khd->rqs[khd->cur_domain]; | |
584 | rq = list_first_entry_or_null(rqs, struct request, queuelist); | |
585 | ||
586 | /* | |
587 | * If there wasn't already a pending request and we haven't flushed the | |
588 | * software queues yet, flush the software queues and check again. | |
589 | */ | |
590 | if (!rq && !*flushed) { | |
591 | kyber_flush_busy_ctxs(khd, hctx); | |
592 | *flushed = true; | |
593 | rq = list_first_entry_or_null(rqs, struct request, queuelist); | |
594 | } | |
595 | ||
596 | if (rq) { | |
597 | nr = kyber_get_domain_token(kqd, khd, hctx); | |
598 | if (nr >= 0) { | |
599 | khd->batching++; | |
600 | rq_set_domain_token(rq, nr); | |
601 | list_del_init(&rq->queuelist); | |
602 | return rq; | |
603 | } | |
604 | } | |
605 | ||
606 | /* There were either no pending requests or no tokens. */ | |
607 | return NULL; | |
608 | } | |
609 | ||
610 | static struct request *kyber_dispatch_request(struct blk_mq_hw_ctx *hctx) | |
611 | { | |
612 | struct kyber_queue_data *kqd = hctx->queue->elevator->elevator_data; | |
613 | struct kyber_hctx_data *khd = hctx->sched_data; | |
614 | bool flushed = false; | |
615 | struct request *rq; | |
616 | int i; | |
617 | ||
618 | spin_lock(&khd->lock); | |
619 | ||
620 | /* | |
621 | * First, if we are still entitled to batch, try to dispatch a request | |
622 | * from the batch. | |
623 | */ | |
624 | if (khd->batching < kyber_batch_size[khd->cur_domain]) { | |
625 | rq = kyber_dispatch_cur_domain(kqd, khd, hctx, &flushed); | |
626 | if (rq) | |
627 | goto out; | |
628 | } | |
629 | ||
630 | /* | |
631 | * Either, | |
632 | * 1. We were no longer entitled to a batch. | |
633 | * 2. The domain we were batching didn't have any requests. | |
634 | * 3. The domain we were batching was out of tokens. | |
635 | * | |
636 | * Start another batch. Note that this wraps back around to the original | |
637 | * domain if no other domains have requests or tokens. | |
638 | */ | |
639 | khd->batching = 0; | |
640 | for (i = 0; i < KYBER_NUM_DOMAINS; i++) { | |
641 | if (khd->cur_domain == KYBER_NUM_DOMAINS - 1) | |
642 | khd->cur_domain = 0; | |
643 | else | |
644 | khd->cur_domain++; | |
645 | ||
646 | rq = kyber_dispatch_cur_domain(kqd, khd, hctx, &flushed); | |
647 | if (rq) | |
648 | goto out; | |
649 | } | |
650 | ||
651 | rq = NULL; | |
652 | out: | |
653 | spin_unlock(&khd->lock); | |
654 | return rq; | |
655 | } | |
656 | ||
657 | static bool kyber_has_work(struct blk_mq_hw_ctx *hctx) | |
658 | { | |
659 | struct kyber_hctx_data *khd = hctx->sched_data; | |
660 | int i; | |
661 | ||
662 | for (i = 0; i < KYBER_NUM_DOMAINS; i++) { | |
663 | if (!list_empty_careful(&khd->rqs[i])) | |
664 | return true; | |
665 | } | |
63ba8e31 | 666 | return sbitmap_any_bit_set(&hctx->ctx_map); |
00e04393 OS |
667 | } |
668 | ||
669 | #define KYBER_LAT_SHOW_STORE(op) \ | |
670 | static ssize_t kyber_##op##_lat_show(struct elevator_queue *e, \ | |
671 | char *page) \ | |
672 | { \ | |
673 | struct kyber_queue_data *kqd = e->elevator_data; \ | |
674 | \ | |
675 | return sprintf(page, "%llu\n", kqd->op##_lat_nsec); \ | |
676 | } \ | |
677 | \ | |
678 | static ssize_t kyber_##op##_lat_store(struct elevator_queue *e, \ | |
679 | const char *page, size_t count) \ | |
680 | { \ | |
681 | struct kyber_queue_data *kqd = e->elevator_data; \ | |
682 | unsigned long long nsec; \ | |
683 | int ret; \ | |
684 | \ | |
685 | ret = kstrtoull(page, 10, &nsec); \ | |
686 | if (ret) \ | |
687 | return ret; \ | |
688 | \ | |
689 | kqd->op##_lat_nsec = nsec; \ | |
690 | \ | |
691 | return count; \ | |
692 | } | |
693 | KYBER_LAT_SHOW_STORE(read); | |
694 | KYBER_LAT_SHOW_STORE(write); | |
695 | #undef KYBER_LAT_SHOW_STORE | |
696 | ||
697 | #define KYBER_LAT_ATTR(op) __ATTR(op##_lat_nsec, 0644, kyber_##op##_lat_show, kyber_##op##_lat_store) | |
698 | static struct elv_fs_entry kyber_sched_attrs[] = { | |
699 | KYBER_LAT_ATTR(read), | |
700 | KYBER_LAT_ATTR(write), | |
701 | __ATTR_NULL | |
702 | }; | |
703 | #undef KYBER_LAT_ATTR | |
704 | ||
16b738f6 OS |
705 | #ifdef CONFIG_BLK_DEBUG_FS |
706 | #define KYBER_DEBUGFS_DOMAIN_ATTRS(domain, name) \ | |
707 | static int kyber_##name##_tokens_show(void *data, struct seq_file *m) \ | |
708 | { \ | |
709 | struct request_queue *q = data; \ | |
710 | struct kyber_queue_data *kqd = q->elevator->elevator_data; \ | |
711 | \ | |
712 | sbitmap_queue_show(&kqd->domain_tokens[domain], m); \ | |
713 | return 0; \ | |
714 | } \ | |
715 | \ | |
716 | static void *kyber_##name##_rqs_start(struct seq_file *m, loff_t *pos) \ | |
717 | __acquires(&khd->lock) \ | |
718 | { \ | |
719 | struct blk_mq_hw_ctx *hctx = m->private; \ | |
720 | struct kyber_hctx_data *khd = hctx->sched_data; \ | |
721 | \ | |
722 | spin_lock(&khd->lock); \ | |
723 | return seq_list_start(&khd->rqs[domain], *pos); \ | |
724 | } \ | |
725 | \ | |
726 | static void *kyber_##name##_rqs_next(struct seq_file *m, void *v, \ | |
727 | loff_t *pos) \ | |
728 | { \ | |
729 | struct blk_mq_hw_ctx *hctx = m->private; \ | |
730 | struct kyber_hctx_data *khd = hctx->sched_data; \ | |
731 | \ | |
732 | return seq_list_next(v, &khd->rqs[domain], pos); \ | |
733 | } \ | |
734 | \ | |
735 | static void kyber_##name##_rqs_stop(struct seq_file *m, void *v) \ | |
736 | __releases(&khd->lock) \ | |
737 | { \ | |
738 | struct blk_mq_hw_ctx *hctx = m->private; \ | |
739 | struct kyber_hctx_data *khd = hctx->sched_data; \ | |
740 | \ | |
741 | spin_unlock(&khd->lock); \ | |
742 | } \ | |
743 | \ | |
744 | static const struct seq_operations kyber_##name##_rqs_seq_ops = { \ | |
745 | .start = kyber_##name##_rqs_start, \ | |
746 | .next = kyber_##name##_rqs_next, \ | |
747 | .stop = kyber_##name##_rqs_stop, \ | |
748 | .show = blk_mq_debugfs_rq_show, \ | |
749 | }; \ | |
750 | \ | |
751 | static int kyber_##name##_waiting_show(void *data, struct seq_file *m) \ | |
752 | { \ | |
753 | struct blk_mq_hw_ctx *hctx = data; \ | |
754 | struct kyber_hctx_data *khd = hctx->sched_data; \ | |
ac6424b9 | 755 | wait_queue_entry_t *wait = &khd->domain_wait[domain]; \ |
16b738f6 | 756 | \ |
2055da97 | 757 | seq_printf(m, "%d\n", !list_empty_careful(&wait->entry)); \ |
16b738f6 OS |
758 | return 0; \ |
759 | } | |
760 | KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_READ, read) | |
761 | KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_SYNC_WRITE, sync_write) | |
762 | KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_OTHER, other) | |
763 | #undef KYBER_DEBUGFS_DOMAIN_ATTRS | |
764 | ||
765 | static int kyber_async_depth_show(void *data, struct seq_file *m) | |
766 | { | |
767 | struct request_queue *q = data; | |
768 | struct kyber_queue_data *kqd = q->elevator->elevator_data; | |
769 | ||
770 | seq_printf(m, "%u\n", kqd->async_depth); | |
771 | return 0; | |
772 | } | |
773 | ||
774 | static int kyber_cur_domain_show(void *data, struct seq_file *m) | |
775 | { | |
776 | struct blk_mq_hw_ctx *hctx = data; | |
777 | struct kyber_hctx_data *khd = hctx->sched_data; | |
778 | ||
779 | switch (khd->cur_domain) { | |
780 | case KYBER_READ: | |
781 | seq_puts(m, "READ\n"); | |
782 | break; | |
783 | case KYBER_SYNC_WRITE: | |
784 | seq_puts(m, "SYNC_WRITE\n"); | |
785 | break; | |
786 | case KYBER_OTHER: | |
787 | seq_puts(m, "OTHER\n"); | |
788 | break; | |
789 | default: | |
790 | seq_printf(m, "%u\n", khd->cur_domain); | |
791 | break; | |
792 | } | |
793 | return 0; | |
794 | } | |
795 | ||
796 | static int kyber_batching_show(void *data, struct seq_file *m) | |
797 | { | |
798 | struct blk_mq_hw_ctx *hctx = data; | |
799 | struct kyber_hctx_data *khd = hctx->sched_data; | |
800 | ||
801 | seq_printf(m, "%u\n", khd->batching); | |
802 | return 0; | |
803 | } | |
804 | ||
805 | #define KYBER_QUEUE_DOMAIN_ATTRS(name) \ | |
806 | {#name "_tokens", 0400, kyber_##name##_tokens_show} | |
807 | static const struct blk_mq_debugfs_attr kyber_queue_debugfs_attrs[] = { | |
808 | KYBER_QUEUE_DOMAIN_ATTRS(read), | |
809 | KYBER_QUEUE_DOMAIN_ATTRS(sync_write), | |
810 | KYBER_QUEUE_DOMAIN_ATTRS(other), | |
811 | {"async_depth", 0400, kyber_async_depth_show}, | |
812 | {}, | |
813 | }; | |
814 | #undef KYBER_QUEUE_DOMAIN_ATTRS | |
815 | ||
816 | #define KYBER_HCTX_DOMAIN_ATTRS(name) \ | |
817 | {#name "_rqs", 0400, .seq_ops = &kyber_##name##_rqs_seq_ops}, \ | |
818 | {#name "_waiting", 0400, kyber_##name##_waiting_show} | |
819 | static const struct blk_mq_debugfs_attr kyber_hctx_debugfs_attrs[] = { | |
820 | KYBER_HCTX_DOMAIN_ATTRS(read), | |
821 | KYBER_HCTX_DOMAIN_ATTRS(sync_write), | |
822 | KYBER_HCTX_DOMAIN_ATTRS(other), | |
823 | {"cur_domain", 0400, kyber_cur_domain_show}, | |
824 | {"batching", 0400, kyber_batching_show}, | |
825 | {}, | |
826 | }; | |
827 | #undef KYBER_HCTX_DOMAIN_ATTRS | |
828 | #endif | |
829 | ||
00e04393 OS |
830 | static struct elevator_type kyber_sched = { |
831 | .ops.mq = { | |
832 | .init_sched = kyber_init_sched, | |
833 | .exit_sched = kyber_exit_sched, | |
834 | .init_hctx = kyber_init_hctx, | |
835 | .exit_hctx = kyber_exit_hctx, | |
5bbf4e5a CH |
836 | .limit_depth = kyber_limit_depth, |
837 | .prepare_request = kyber_prepare_request, | |
7b9e9361 | 838 | .finish_request = kyber_finish_request, |
ba989a01 | 839 | .requeue_request = kyber_finish_request, |
00e04393 OS |
840 | .completed_request = kyber_completed_request, |
841 | .dispatch_request = kyber_dispatch_request, | |
842 | .has_work = kyber_has_work, | |
843 | }, | |
844 | .uses_mq = true, | |
16b738f6 OS |
845 | #ifdef CONFIG_BLK_DEBUG_FS |
846 | .queue_debugfs_attrs = kyber_queue_debugfs_attrs, | |
847 | .hctx_debugfs_attrs = kyber_hctx_debugfs_attrs, | |
848 | #endif | |
00e04393 OS |
849 | .elevator_attrs = kyber_sched_attrs, |
850 | .elevator_name = "kyber", | |
851 | .elevator_owner = THIS_MODULE, | |
852 | }; | |
853 | ||
854 | static int __init kyber_init(void) | |
855 | { | |
856 | return elv_register(&kyber_sched); | |
857 | } | |
858 | ||
859 | static void __exit kyber_exit(void) | |
860 | { | |
861 | elv_unregister(&kyber_sched); | |
862 | } | |
863 | ||
864 | module_init(kyber_init); | |
865 | module_exit(kyber_exit); | |
866 | ||
867 | MODULE_AUTHOR("Omar Sandoval"); | |
868 | MODULE_LICENSE("GPL"); | |
869 | MODULE_DESCRIPTION("Kyber I/O scheduler"); |