blk-mq: move hctx->dispatch and ctx->rq_list from sysfs to debugfs
[linux-block.git] / block / blk-mq-sysfs.c
1 #include <linux/kernel.h>
2 #include <linux/module.h>
3 #include <linux/backing-dev.h>
4 #include <linux/bio.h>
5 #include <linux/blkdev.h>
6 #include <linux/mm.h>
7 #include <linux/init.h>
8 #include <linux/slab.h>
9 #include <linux/workqueue.h>
10 #include <linux/smp.h>
11
12 #include <linux/blk-mq.h>
13 #include "blk-mq.h"
14 #include "blk-mq-tag.h"
15
16 static void blk_mq_sysfs_release(struct kobject *kobj)
17 {
18 }
19
20 struct blk_mq_ctx_sysfs_entry {
21         struct attribute attr;
22         ssize_t (*show)(struct blk_mq_ctx *, char *);
23         ssize_t (*store)(struct blk_mq_ctx *, const char *, size_t);
24 };
25
26 struct blk_mq_hw_ctx_sysfs_entry {
27         struct attribute attr;
28         ssize_t (*show)(struct blk_mq_hw_ctx *, char *);
29         ssize_t (*store)(struct blk_mq_hw_ctx *, const char *, size_t);
30 };
31
32 static ssize_t blk_mq_sysfs_show(struct kobject *kobj, struct attribute *attr,
33                                  char *page)
34 {
35         struct blk_mq_ctx_sysfs_entry *entry;
36         struct blk_mq_ctx *ctx;
37         struct request_queue *q;
38         ssize_t res;
39
40         entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
41         ctx = container_of(kobj, struct blk_mq_ctx, kobj);
42         q = ctx->queue;
43
44         if (!entry->show)
45                 return -EIO;
46
47         res = -ENOENT;
48         mutex_lock(&q->sysfs_lock);
49         if (!blk_queue_dying(q))
50                 res = entry->show(ctx, page);
51         mutex_unlock(&q->sysfs_lock);
52         return res;
53 }
54
55 static ssize_t blk_mq_sysfs_store(struct kobject *kobj, struct attribute *attr,
56                                   const char *page, size_t length)
57 {
58         struct blk_mq_ctx_sysfs_entry *entry;
59         struct blk_mq_ctx *ctx;
60         struct request_queue *q;
61         ssize_t res;
62
63         entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
64         ctx = container_of(kobj, struct blk_mq_ctx, kobj);
65         q = ctx->queue;
66
67         if (!entry->store)
68                 return -EIO;
69
70         res = -ENOENT;
71         mutex_lock(&q->sysfs_lock);
72         if (!blk_queue_dying(q))
73                 res = entry->store(ctx, page, length);
74         mutex_unlock(&q->sysfs_lock);
75         return res;
76 }
77
78 static ssize_t blk_mq_hw_sysfs_show(struct kobject *kobj,
79                                     struct attribute *attr, char *page)
80 {
81         struct blk_mq_hw_ctx_sysfs_entry *entry;
82         struct blk_mq_hw_ctx *hctx;
83         struct request_queue *q;
84         ssize_t res;
85
86         entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
87         hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
88         q = hctx->queue;
89
90         if (!entry->show)
91                 return -EIO;
92
93         res = -ENOENT;
94         mutex_lock(&q->sysfs_lock);
95         if (!blk_queue_dying(q))
96                 res = entry->show(hctx, page);
97         mutex_unlock(&q->sysfs_lock);
98         return res;
99 }
100
101 static ssize_t blk_mq_hw_sysfs_store(struct kobject *kobj,
102                                      struct attribute *attr, const char *page,
103                                      size_t length)
104 {
105         struct blk_mq_hw_ctx_sysfs_entry *entry;
106         struct blk_mq_hw_ctx *hctx;
107         struct request_queue *q;
108         ssize_t res;
109
110         entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
111         hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
112         q = hctx->queue;
113
114         if (!entry->store)
115                 return -EIO;
116
117         res = -ENOENT;
118         mutex_lock(&q->sysfs_lock);
119         if (!blk_queue_dying(q))
120                 res = entry->store(hctx, page, length);
121         mutex_unlock(&q->sysfs_lock);
122         return res;
123 }
124
125 static ssize_t blk_mq_sysfs_dispatched_show(struct blk_mq_ctx *ctx, char *page)
126 {
127         return sprintf(page, "%lu %lu\n", ctx->rq_dispatched[1],
128                                 ctx->rq_dispatched[0]);
129 }
130
131 static ssize_t blk_mq_sysfs_merged_show(struct blk_mq_ctx *ctx, char *page)
132 {
133         return sprintf(page, "%lu\n", ctx->rq_merged);
134 }
135
136 static ssize_t blk_mq_sysfs_completed_show(struct blk_mq_ctx *ctx, char *page)
137 {
138         return sprintf(page, "%lu %lu\n", ctx->rq_completed[1],
139                                 ctx->rq_completed[0]);
140 }
141
142 static ssize_t blk_mq_hw_sysfs_poll_show(struct blk_mq_hw_ctx *hctx, char *page)
143 {
144         return sprintf(page, "considered=%lu, invoked=%lu, success=%lu\n",
145                        hctx->poll_considered, hctx->poll_invoked,
146                        hctx->poll_success);
147 }
148
149 static ssize_t blk_mq_hw_sysfs_poll_store(struct blk_mq_hw_ctx *hctx,
150                                           const char *page, size_t size)
151 {
152         hctx->poll_considered = hctx->poll_invoked = hctx->poll_success = 0;
153
154         return size;
155 }
156
157 static ssize_t blk_mq_hw_sysfs_queued_show(struct blk_mq_hw_ctx *hctx,
158                                            char *page)
159 {
160         return sprintf(page, "%lu\n", hctx->queued);
161 }
162
163 static ssize_t blk_mq_hw_sysfs_run_show(struct blk_mq_hw_ctx *hctx, char *page)
164 {
165         return sprintf(page, "%lu\n", hctx->run);
166 }
167
168 static ssize_t blk_mq_hw_sysfs_dispatched_show(struct blk_mq_hw_ctx *hctx,
169                                                char *page)
170 {
171         char *start_page = page;
172         int i;
173
174         page += sprintf(page, "%8u\t%lu\n", 0U, hctx->dispatched[0]);
175
176         for (i = 1; i < BLK_MQ_MAX_DISPATCH_ORDER - 1; i++) {
177                 unsigned int d = 1U << (i - 1);
178
179                 page += sprintf(page, "%8u\t%lu\n", d, hctx->dispatched[i]);
180         }
181
182         page += sprintf(page, "%8u+\t%lu\n", 1U << (i - 1),
183                                                 hctx->dispatched[i]);
184         return page - start_page;
185 }
186
187 static ssize_t blk_mq_hw_sysfs_sched_tags_show(struct blk_mq_hw_ctx *hctx, char *page)
188 {
189         if (hctx->sched_tags)
190                 return blk_mq_tag_sysfs_show(hctx->sched_tags, page);
191
192         return 0;
193 }
194
195 static ssize_t blk_mq_hw_sysfs_tags_show(struct blk_mq_hw_ctx *hctx, char *page)
196 {
197         return blk_mq_tag_sysfs_show(hctx->tags, page);
198 }
199
200 static ssize_t blk_mq_hw_sysfs_active_show(struct blk_mq_hw_ctx *hctx, char *page)
201 {
202         return sprintf(page, "%u\n", atomic_read(&hctx->nr_active));
203 }
204
205 static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
206 {
207         unsigned int i, first = 1;
208         ssize_t ret = 0;
209
210         for_each_cpu(i, hctx->cpumask) {
211                 if (first)
212                         ret += sprintf(ret + page, "%u", i);
213                 else
214                         ret += sprintf(ret + page, ", %u", i);
215
216                 first = 0;
217         }
218
219         ret += sprintf(ret + page, "\n");
220         return ret;
221 }
222
223 static void blk_mq_stat_clear(struct blk_mq_hw_ctx *hctx)
224 {
225         struct blk_mq_ctx *ctx;
226         unsigned int i;
227
228         hctx_for_each_ctx(hctx, ctx, i) {
229                 blk_stat_init(&ctx->stat[BLK_STAT_READ]);
230                 blk_stat_init(&ctx->stat[BLK_STAT_WRITE]);
231         }
232 }
233
234 static ssize_t blk_mq_hw_sysfs_stat_store(struct blk_mq_hw_ctx *hctx,
235                                           const char *page, size_t count)
236 {
237         blk_mq_stat_clear(hctx);
238         return count;
239 }
240
241 static ssize_t print_stat(char *page, struct blk_rq_stat *stat, const char *pre)
242 {
243         return sprintf(page, "%s samples=%llu, mean=%lld, min=%lld, max=%lld\n",
244                         pre, (long long) stat->nr_samples,
245                         (long long) stat->mean, (long long) stat->min,
246                         (long long) stat->max);
247 }
248
249 static ssize_t blk_mq_hw_sysfs_stat_show(struct blk_mq_hw_ctx *hctx, char *page)
250 {
251         struct blk_rq_stat stat[2];
252         ssize_t ret;
253
254         blk_stat_init(&stat[BLK_STAT_READ]);
255         blk_stat_init(&stat[BLK_STAT_WRITE]);
256
257         blk_hctx_stat_get(hctx, stat);
258
259         ret = print_stat(page, &stat[BLK_STAT_READ], "read :");
260         ret += print_stat(page + ret, &stat[BLK_STAT_WRITE], "write:");
261         return ret;
262 }
263
264 static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_dispatched = {
265         .attr = {.name = "dispatched", .mode = S_IRUGO },
266         .show = blk_mq_sysfs_dispatched_show,
267 };
268 static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_merged = {
269         .attr = {.name = "merged", .mode = S_IRUGO },
270         .show = blk_mq_sysfs_merged_show,
271 };
272 static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_completed = {
273         .attr = {.name = "completed", .mode = S_IRUGO },
274         .show = blk_mq_sysfs_completed_show,
275 };
276
277 static struct attribute *default_ctx_attrs[] = {
278         &blk_mq_sysfs_dispatched.attr,
279         &blk_mq_sysfs_merged.attr,
280         &blk_mq_sysfs_completed.attr,
281         NULL,
282 };
283
284 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_queued = {
285         .attr = {.name = "queued", .mode = S_IRUGO },
286         .show = blk_mq_hw_sysfs_queued_show,
287 };
288 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_run = {
289         .attr = {.name = "run", .mode = S_IRUGO },
290         .show = blk_mq_hw_sysfs_run_show,
291 };
292 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_dispatched = {
293         .attr = {.name = "dispatched", .mode = S_IRUGO },
294         .show = blk_mq_hw_sysfs_dispatched_show,
295 };
296 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_active = {
297         .attr = {.name = "active", .mode = S_IRUGO },
298         .show = blk_mq_hw_sysfs_active_show,
299 };
300 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_sched_tags = {
301         .attr = {.name = "sched_tags", .mode = S_IRUGO },
302         .show = blk_mq_hw_sysfs_sched_tags_show,
303 };
304 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_tags = {
305         .attr = {.name = "tags", .mode = S_IRUGO },
306         .show = blk_mq_hw_sysfs_tags_show,
307 };
308 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus = {
309         .attr = {.name = "cpu_list", .mode = S_IRUGO },
310         .show = blk_mq_hw_sysfs_cpus_show,
311 };
312 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_poll = {
313         .attr = {.name = "io_poll", .mode = S_IWUSR | S_IRUGO },
314         .show = blk_mq_hw_sysfs_poll_show,
315         .store = blk_mq_hw_sysfs_poll_store,
316 };
317 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_stat = {
318         .attr = {.name = "stats", .mode = S_IRUGO | S_IWUSR },
319         .show = blk_mq_hw_sysfs_stat_show,
320         .store = blk_mq_hw_sysfs_stat_store,
321 };
322
323 static struct attribute *default_hw_ctx_attrs[] = {
324         &blk_mq_hw_sysfs_queued.attr,
325         &blk_mq_hw_sysfs_run.attr,
326         &blk_mq_hw_sysfs_dispatched.attr,
327         &blk_mq_hw_sysfs_tags.attr,
328         &blk_mq_hw_sysfs_sched_tags.attr,
329         &blk_mq_hw_sysfs_cpus.attr,
330         &blk_mq_hw_sysfs_active.attr,
331         &blk_mq_hw_sysfs_poll.attr,
332         &blk_mq_hw_sysfs_stat.attr,
333         NULL,
334 };
335
336 static const struct sysfs_ops blk_mq_sysfs_ops = {
337         .show   = blk_mq_sysfs_show,
338         .store  = blk_mq_sysfs_store,
339 };
340
341 static const struct sysfs_ops blk_mq_hw_sysfs_ops = {
342         .show   = blk_mq_hw_sysfs_show,
343         .store  = blk_mq_hw_sysfs_store,
344 };
345
346 static struct kobj_type blk_mq_ktype = {
347         .sysfs_ops      = &blk_mq_sysfs_ops,
348         .release        = blk_mq_sysfs_release,
349 };
350
351 static struct kobj_type blk_mq_ctx_ktype = {
352         .sysfs_ops      = &blk_mq_sysfs_ops,
353         .default_attrs  = default_ctx_attrs,
354         .release        = blk_mq_sysfs_release,
355 };
356
357 static struct kobj_type blk_mq_hw_ktype = {
358         .sysfs_ops      = &blk_mq_hw_sysfs_ops,
359         .default_attrs  = default_hw_ctx_attrs,
360         .release        = blk_mq_sysfs_release,
361 };
362
363 static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
364 {
365         struct blk_mq_ctx *ctx;
366         int i;
367
368         if (!hctx->nr_ctx)
369                 return;
370
371         hctx_for_each_ctx(hctx, ctx, i)
372                 kobject_del(&ctx->kobj);
373
374         kobject_del(&hctx->kobj);
375 }
376
377 static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
378 {
379         struct request_queue *q = hctx->queue;
380         struct blk_mq_ctx *ctx;
381         int i, ret;
382
383         if (!hctx->nr_ctx)
384                 return 0;
385
386         ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", hctx->queue_num);
387         if (ret)
388                 return ret;
389
390         hctx_for_each_ctx(hctx, ctx, i) {
391                 ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
392                 if (ret)
393                         break;
394         }
395
396         return ret;
397 }
398
399 static void __blk_mq_unregister_dev(struct device *dev, struct request_queue *q)
400 {
401         struct blk_mq_hw_ctx *hctx;
402         struct blk_mq_ctx *ctx;
403         int i, j;
404
405         queue_for_each_hw_ctx(q, hctx, i) {
406                 blk_mq_unregister_hctx(hctx);
407
408                 hctx_for_each_ctx(hctx, ctx, j)
409                         kobject_put(&ctx->kobj);
410
411                 kobject_put(&hctx->kobj);
412         }
413
414         blk_mq_debugfs_unregister(q);
415
416         kobject_uevent(&q->mq_kobj, KOBJ_REMOVE);
417         kobject_del(&q->mq_kobj);
418         kobject_put(&q->mq_kobj);
419
420         kobject_put(&dev->kobj);
421
422         q->mq_sysfs_init_done = false;
423 }
424
425 void blk_mq_unregister_dev(struct device *dev, struct request_queue *q)
426 {
427         blk_mq_disable_hotplug();
428         __blk_mq_unregister_dev(dev, q);
429         blk_mq_enable_hotplug();
430 }
431
432 void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx)
433 {
434         kobject_init(&hctx->kobj, &blk_mq_hw_ktype);
435 }
436
437 static void blk_mq_sysfs_init(struct request_queue *q)
438 {
439         struct blk_mq_ctx *ctx;
440         int cpu;
441
442         kobject_init(&q->mq_kobj, &blk_mq_ktype);
443
444         for_each_possible_cpu(cpu) {
445                 ctx = per_cpu_ptr(q->queue_ctx, cpu);
446                 kobject_init(&ctx->kobj, &blk_mq_ctx_ktype);
447         }
448 }
449
450 int blk_mq_register_dev(struct device *dev, struct request_queue *q)
451 {
452         struct blk_mq_hw_ctx *hctx;
453         int ret, i;
454
455         blk_mq_disable_hotplug();
456
457         blk_mq_sysfs_init(q);
458
459         ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
460         if (ret < 0)
461                 goto out;
462
463         kobject_uevent(&q->mq_kobj, KOBJ_ADD);
464
465         blk_mq_debugfs_register(q, kobject_name(&dev->kobj));
466
467         queue_for_each_hw_ctx(q, hctx, i) {
468                 ret = blk_mq_register_hctx(hctx);
469                 if (ret)
470                         break;
471         }
472
473         if (ret)
474                 __blk_mq_unregister_dev(dev, q);
475         else
476                 q->mq_sysfs_init_done = true;
477 out:
478         blk_mq_enable_hotplug();
479
480         return ret;
481 }
482 EXPORT_SYMBOL_GPL(blk_mq_register_dev);
483
484 void blk_mq_sysfs_unregister(struct request_queue *q)
485 {
486         struct blk_mq_hw_ctx *hctx;
487         int i;
488
489         if (!q->mq_sysfs_init_done)
490                 return;
491
492         blk_mq_debugfs_unregister_hctxs(q);
493
494         queue_for_each_hw_ctx(q, hctx, i)
495                 blk_mq_unregister_hctx(hctx);
496 }
497
498 int blk_mq_sysfs_register(struct request_queue *q)
499 {
500         struct blk_mq_hw_ctx *hctx;
501         int i, ret = 0;
502
503         if (!q->mq_sysfs_init_done)
504                 return ret;
505
506         blk_mq_debugfs_register_hctxs(q);
507
508         queue_for_each_hw_ctx(q, hctx, i) {
509                 ret = blk_mq_register_hctx(hctx);
510                 if (ret)
511                         break;
512         }
513
514         return ret;
515 }