blk-mq: introduce blk_mq_delay_kick_requeue_list()
[linux-2.6-block.git] / block / blk-mq-sysfs.c
CommitLineData
320ae51f
JA
1#include <linux/kernel.h>
2#include <linux/module.h>
3#include <linux/backing-dev.h>
4#include <linux/bio.h>
5#include <linux/blkdev.h>
6#include <linux/mm.h>
7#include <linux/init.h>
8#include <linux/slab.h>
9#include <linux/workqueue.h>
10#include <linux/smp.h>
11
12#include <linux/blk-mq.h>
13#include "blk-mq.h"
14#include "blk-mq-tag.h"
15
16static void blk_mq_sysfs_release(struct kobject *kobj)
17{
18}
19
20struct blk_mq_ctx_sysfs_entry {
21 struct attribute attr;
22 ssize_t (*show)(struct blk_mq_ctx *, char *);
23 ssize_t (*store)(struct blk_mq_ctx *, const char *, size_t);
24};
25
26struct blk_mq_hw_ctx_sysfs_entry {
27 struct attribute attr;
28 ssize_t (*show)(struct blk_mq_hw_ctx *, char *);
29 ssize_t (*store)(struct blk_mq_hw_ctx *, const char *, size_t);
30};
31
32static ssize_t blk_mq_sysfs_show(struct kobject *kobj, struct attribute *attr,
33 char *page)
34{
35 struct blk_mq_ctx_sysfs_entry *entry;
36 struct blk_mq_ctx *ctx;
37 struct request_queue *q;
38 ssize_t res;
39
40 entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
41 ctx = container_of(kobj, struct blk_mq_ctx, kobj);
42 q = ctx->queue;
43
44 if (!entry->show)
45 return -EIO;
46
47 res = -ENOENT;
48 mutex_lock(&q->sysfs_lock);
49 if (!blk_queue_dying(q))
50 res = entry->show(ctx, page);
51 mutex_unlock(&q->sysfs_lock);
52 return res;
53}
54
55static ssize_t blk_mq_sysfs_store(struct kobject *kobj, struct attribute *attr,
56 const char *page, size_t length)
57{
58 struct blk_mq_ctx_sysfs_entry *entry;
59 struct blk_mq_ctx *ctx;
60 struct request_queue *q;
61 ssize_t res;
62
63 entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
64 ctx = container_of(kobj, struct blk_mq_ctx, kobj);
65 q = ctx->queue;
66
67 if (!entry->store)
68 return -EIO;
69
70 res = -ENOENT;
71 mutex_lock(&q->sysfs_lock);
72 if (!blk_queue_dying(q))
73 res = entry->store(ctx, page, length);
74 mutex_unlock(&q->sysfs_lock);
75 return res;
76}
77
78static ssize_t blk_mq_hw_sysfs_show(struct kobject *kobj,
79 struct attribute *attr, char *page)
80{
81 struct blk_mq_hw_ctx_sysfs_entry *entry;
82 struct blk_mq_hw_ctx *hctx;
83 struct request_queue *q;
84 ssize_t res;
85
86 entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
87 hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
88 q = hctx->queue;
89
90 if (!entry->show)
91 return -EIO;
92
93 res = -ENOENT;
94 mutex_lock(&q->sysfs_lock);
95 if (!blk_queue_dying(q))
96 res = entry->show(hctx, page);
97 mutex_unlock(&q->sysfs_lock);
98 return res;
99}
100
101static ssize_t blk_mq_hw_sysfs_store(struct kobject *kobj,
102 struct attribute *attr, const char *page,
103 size_t length)
104{
105 struct blk_mq_hw_ctx_sysfs_entry *entry;
106 struct blk_mq_hw_ctx *hctx;
107 struct request_queue *q;
108 ssize_t res;
109
110 entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
111 hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
112 q = hctx->queue;
113
114 if (!entry->store)
115 return -EIO;
116
117 res = -ENOENT;
118 mutex_lock(&q->sysfs_lock);
119 if (!blk_queue_dying(q))
120 res = entry->store(hctx, page, length);
121 mutex_unlock(&q->sysfs_lock);
122 return res;
123}
124
125static ssize_t blk_mq_sysfs_dispatched_show(struct blk_mq_ctx *ctx, char *page)
126{
127 return sprintf(page, "%lu %lu\n", ctx->rq_dispatched[1],
128 ctx->rq_dispatched[0]);
129}
130
131static ssize_t blk_mq_sysfs_merged_show(struct blk_mq_ctx *ctx, char *page)
132{
133 return sprintf(page, "%lu\n", ctx->rq_merged);
134}
135
136static ssize_t blk_mq_sysfs_completed_show(struct blk_mq_ctx *ctx, char *page)
137{
138 return sprintf(page, "%lu %lu\n", ctx->rq_completed[1],
139 ctx->rq_completed[0]);
140}
141
142static ssize_t sysfs_list_show(char *page, struct list_head *list, char *msg)
143{
320ae51f 144 struct request *rq;
596f5aad
ML
145 int len = snprintf(page, PAGE_SIZE - 1, "%s:\n", msg);
146
147 list_for_each_entry(rq, list, queuelist) {
148 const int rq_len = 2 * sizeof(rq) + 2;
149
150 /* if the output will be truncated */
151 if (PAGE_SIZE - 1 < len + rq_len) {
152 /* backspacing if it can't hold '\t...\n' */
153 if (PAGE_SIZE - 1 < len + 5)
154 len -= rq_len;
155 len += snprintf(page + len, PAGE_SIZE - 1 - len,
156 "\t...\n");
157 break;
158 }
159 len += snprintf(page + len, PAGE_SIZE - 1 - len,
160 "\t%p\n", rq);
161 }
162
163 return len;
320ae51f
JA
164}
165
166static ssize_t blk_mq_sysfs_rq_list_show(struct blk_mq_ctx *ctx, char *page)
167{
168 ssize_t ret;
169
170 spin_lock(&ctx->lock);
171 ret = sysfs_list_show(page, &ctx->rq_list, "CTX pending");
172 spin_unlock(&ctx->lock);
173
174 return ret;
175}
176
05229bee
JA
177static ssize_t blk_mq_hw_sysfs_poll_show(struct blk_mq_hw_ctx *hctx, char *page)
178{
6e219353
SB
179 return sprintf(page, "considered=%lu, invoked=%lu, success=%lu\n",
180 hctx->poll_considered, hctx->poll_invoked,
181 hctx->poll_success);
05229bee
JA
182}
183
d21ea4bc
SB
184static ssize_t blk_mq_hw_sysfs_poll_store(struct blk_mq_hw_ctx *hctx,
185 const char *page, size_t size)
186{
187 hctx->poll_considered = hctx->poll_invoked = hctx->poll_success = 0;
188
189 return size;
190}
191
320ae51f
JA
192static ssize_t blk_mq_hw_sysfs_queued_show(struct blk_mq_hw_ctx *hctx,
193 char *page)
194{
195 return sprintf(page, "%lu\n", hctx->queued);
196}
197
198static ssize_t blk_mq_hw_sysfs_run_show(struct blk_mq_hw_ctx *hctx, char *page)
199{
200 return sprintf(page, "%lu\n", hctx->run);
201}
202
203static ssize_t blk_mq_hw_sysfs_dispatched_show(struct blk_mq_hw_ctx *hctx,
204 char *page)
205{
206 char *start_page = page;
207 int i;
208
209 page += sprintf(page, "%8u\t%lu\n", 0U, hctx->dispatched[0]);
210
211 for (i = 1; i < BLK_MQ_MAX_DISPATCH_ORDER; i++) {
212 unsigned long d = 1U << (i - 1);
213
214 page += sprintf(page, "%8lu\t%lu\n", d, hctx->dispatched[i]);
215 }
216
217 return page - start_page;
218}
219
220static ssize_t blk_mq_hw_sysfs_rq_list_show(struct blk_mq_hw_ctx *hctx,
221 char *page)
222{
223 ssize_t ret;
224
225 spin_lock(&hctx->lock);
226 ret = sysfs_list_show(page, &hctx->dispatch, "HCTX pending");
227 spin_unlock(&hctx->lock);
228
229 return ret;
230}
231
320ae51f
JA
232static ssize_t blk_mq_hw_sysfs_tags_show(struct blk_mq_hw_ctx *hctx, char *page)
233{
234 return blk_mq_tag_sysfs_show(hctx->tags, page);
235}
236
0d2602ca
JA
237static ssize_t blk_mq_hw_sysfs_active_show(struct blk_mq_hw_ctx *hctx, char *page)
238{
239 return sprintf(page, "%u\n", atomic_read(&hctx->nr_active));
240}
241
676141e4
JA
242static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
243{
cb2da43e 244 unsigned int i, first = 1;
676141e4
JA
245 ssize_t ret = 0;
246
cb2da43e 247 for_each_cpu(i, hctx->cpumask) {
676141e4
JA
248 if (first)
249 ret += sprintf(ret + page, "%u", i);
250 else
251 ret += sprintf(ret + page, ", %u", i);
252
253 first = 0;
254 }
255
676141e4
JA
256 ret += sprintf(ret + page, "\n");
257 return ret;
258}
259
320ae51f
JA
260static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_dispatched = {
261 .attr = {.name = "dispatched", .mode = S_IRUGO },
262 .show = blk_mq_sysfs_dispatched_show,
263};
264static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_merged = {
265 .attr = {.name = "merged", .mode = S_IRUGO },
266 .show = blk_mq_sysfs_merged_show,
267};
268static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_completed = {
269 .attr = {.name = "completed", .mode = S_IRUGO },
270 .show = blk_mq_sysfs_completed_show,
271};
272static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_rq_list = {
273 .attr = {.name = "rq_list", .mode = S_IRUGO },
274 .show = blk_mq_sysfs_rq_list_show,
275};
276
277static struct attribute *default_ctx_attrs[] = {
278 &blk_mq_sysfs_dispatched.attr,
279 &blk_mq_sysfs_merged.attr,
280 &blk_mq_sysfs_completed.attr,
281 &blk_mq_sysfs_rq_list.attr,
282 NULL,
283};
284
285static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_queued = {
286 .attr = {.name = "queued", .mode = S_IRUGO },
287 .show = blk_mq_hw_sysfs_queued_show,
288};
289static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_run = {
290 .attr = {.name = "run", .mode = S_IRUGO },
291 .show = blk_mq_hw_sysfs_run_show,
292};
293static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_dispatched = {
294 .attr = {.name = "dispatched", .mode = S_IRUGO },
295 .show = blk_mq_hw_sysfs_dispatched_show,
296};
0d2602ca
JA
297static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_active = {
298 .attr = {.name = "active", .mode = S_IRUGO },
299 .show = blk_mq_hw_sysfs_active_show,
300};
320ae51f
JA
301static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_pending = {
302 .attr = {.name = "pending", .mode = S_IRUGO },
303 .show = blk_mq_hw_sysfs_rq_list_show,
304};
320ae51f
JA
305static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_tags = {
306 .attr = {.name = "tags", .mode = S_IRUGO },
307 .show = blk_mq_hw_sysfs_tags_show,
308};
676141e4
JA
309static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus = {
310 .attr = {.name = "cpu_list", .mode = S_IRUGO },
311 .show = blk_mq_hw_sysfs_cpus_show,
312};
05229bee 313static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_poll = {
d21ea4bc 314 .attr = {.name = "io_poll", .mode = S_IWUSR | S_IRUGO },
05229bee 315 .show = blk_mq_hw_sysfs_poll_show,
d21ea4bc 316 .store = blk_mq_hw_sysfs_poll_store,
05229bee 317};
320ae51f
JA
318
319static struct attribute *default_hw_ctx_attrs[] = {
320 &blk_mq_hw_sysfs_queued.attr,
321 &blk_mq_hw_sysfs_run.attr,
322 &blk_mq_hw_sysfs_dispatched.attr,
323 &blk_mq_hw_sysfs_pending.attr,
320ae51f 324 &blk_mq_hw_sysfs_tags.attr,
676141e4 325 &blk_mq_hw_sysfs_cpus.attr,
0d2602ca 326 &blk_mq_hw_sysfs_active.attr,
05229bee 327 &blk_mq_hw_sysfs_poll.attr,
320ae51f
JA
328 NULL,
329};
330
331static const struct sysfs_ops blk_mq_sysfs_ops = {
332 .show = blk_mq_sysfs_show,
333 .store = blk_mq_sysfs_store,
334};
335
336static const struct sysfs_ops blk_mq_hw_sysfs_ops = {
337 .show = blk_mq_hw_sysfs_show,
338 .store = blk_mq_hw_sysfs_store,
339};
340
341static struct kobj_type blk_mq_ktype = {
342 .sysfs_ops = &blk_mq_sysfs_ops,
343 .release = blk_mq_sysfs_release,
344};
345
346static struct kobj_type blk_mq_ctx_ktype = {
347 .sysfs_ops = &blk_mq_sysfs_ops,
348 .default_attrs = default_ctx_attrs,
74170118 349 .release = blk_mq_sysfs_release,
320ae51f
JA
350};
351
352static struct kobj_type blk_mq_hw_ktype = {
353 .sysfs_ops = &blk_mq_hw_sysfs_ops,
354 .default_attrs = default_hw_ctx_attrs,
74170118 355 .release = blk_mq_sysfs_release,
320ae51f
JA
356};
357
ee3c5db0 358static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
67aec14c
JA
359{
360 struct blk_mq_ctx *ctx;
361 int i;
362
4593fdbe 363 if (!hctx->nr_ctx)
67aec14c
JA
364 return;
365
366 hctx_for_each_ctx(hctx, ctx, i)
367 kobject_del(&ctx->kobj);
368
369 kobject_del(&hctx->kobj);
370}
371
ee3c5db0 372static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
67aec14c
JA
373{
374 struct request_queue *q = hctx->queue;
375 struct blk_mq_ctx *ctx;
376 int i, ret;
377
4593fdbe 378 if (!hctx->nr_ctx)
67aec14c
JA
379 return 0;
380
381 ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", hctx->queue_num);
382 if (ret)
383 return ret;
384
385 hctx_for_each_ctx(hctx, ctx, i) {
386 ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
387 if (ret)
388 break;
389 }
390
391 return ret;
392}
393
c0f3fd2b 394static void __blk_mq_unregister_disk(struct gendisk *disk)
320ae51f
JA
395{
396 struct request_queue *q = disk->queue;
85157366
AV
397 struct blk_mq_hw_ctx *hctx;
398 struct blk_mq_ctx *ctx;
399 int i, j;
400
401 queue_for_each_hw_ctx(q, hctx, i) {
67aec14c
JA
402 blk_mq_unregister_hctx(hctx);
403
404 hctx_for_each_ctx(hctx, ctx, j)
85157366 405 kobject_put(&ctx->kobj);
67aec14c 406
85157366
AV
407 kobject_put(&hctx->kobj);
408 }
320ae51f
JA
409
410 kobject_uevent(&q->mq_kobj, KOBJ_REMOVE);
411 kobject_del(&q->mq_kobj);
85157366 412 kobject_put(&q->mq_kobj);
320ae51f
JA
413
414 kobject_put(&disk_to_dev(disk)->kobj);
4593fdbe
AM
415
416 q->mq_sysfs_init_done = false;
c0f3fd2b
JA
417}
418
419void blk_mq_unregister_disk(struct gendisk *disk)
420{
421 blk_mq_disable_hotplug();
422 __blk_mq_unregister_disk(disk);
4593fdbe 423 blk_mq_enable_hotplug();
320ae51f
JA
424}
425
868f2f0b
KB
426void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx)
427{
428 kobject_init(&hctx->kobj, &blk_mq_hw_ktype);
429}
430
67aec14c
JA
431static void blk_mq_sysfs_init(struct request_queue *q)
432{
67aec14c 433 struct blk_mq_ctx *ctx;
897bb0c7 434 int cpu;
67aec14c
JA
435
436 kobject_init(&q->mq_kobj, &blk_mq_ktype);
437
897bb0c7
TG
438 for_each_possible_cpu(cpu) {
439 ctx = per_cpu_ptr(q->queue_ctx, cpu);
06a41a99 440 kobject_init(&ctx->kobj, &blk_mq_ctx_ktype);
897bb0c7 441 }
67aec14c
JA
442}
443
320ae51f
JA
444int blk_mq_register_disk(struct gendisk *disk)
445{
446 struct device *dev = disk_to_dev(disk);
447 struct request_queue *q = disk->queue;
448 struct blk_mq_hw_ctx *hctx;
67aec14c 449 int ret, i;
320ae51f 450
4593fdbe
AM
451 blk_mq_disable_hotplug();
452
67aec14c 453 blk_mq_sysfs_init(q);
320ae51f
JA
454
455 ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
456 if (ret < 0)
4593fdbe 457 goto out;
320ae51f
JA
458
459 kobject_uevent(&q->mq_kobj, KOBJ_ADD);
460
461 queue_for_each_hw_ctx(q, hctx, i) {
67aec14c 462 ret = blk_mq_register_hctx(hctx);
320ae51f
JA
463 if (ret)
464 break;
320ae51f
JA
465 }
466
4593fdbe 467 if (ret)
c0f3fd2b 468 __blk_mq_unregister_disk(disk);
4593fdbe
AM
469 else
470 q->mq_sysfs_init_done = true;
471out:
472 blk_mq_enable_hotplug();
320ae51f 473
4593fdbe 474 return ret;
320ae51f 475}
b62c21b7 476EXPORT_SYMBOL_GPL(blk_mq_register_disk);
67aec14c
JA
477
478void blk_mq_sysfs_unregister(struct request_queue *q)
479{
480 struct blk_mq_hw_ctx *hctx;
481 int i;
482
4593fdbe
AM
483 if (!q->mq_sysfs_init_done)
484 return;
485
67aec14c
JA
486 queue_for_each_hw_ctx(q, hctx, i)
487 blk_mq_unregister_hctx(hctx);
488}
489
490int blk_mq_sysfs_register(struct request_queue *q)
491{
492 struct blk_mq_hw_ctx *hctx;
493 int i, ret = 0;
494
4593fdbe
AM
495 if (!q->mq_sysfs_init_done)
496 return ret;
497
67aec14c
JA
498 queue_for_each_hw_ctx(q, hctx, i) {
499 ret = blk_mq_register_hctx(hctx);
500 if (ret)
501 break;
502 }
503
504 return ret;
505}