blk-mq: Make blk_mq_run_hw_queue() return void
[linux-2.6-block.git] / block / blk-mq-sysfs.c
CommitLineData
3dcf60bc 1// SPDX-License-Identifier: GPL-2.0
320ae51f
JA
2#include <linux/kernel.h>
3#include <linux/module.h>
4#include <linux/backing-dev.h>
5#include <linux/bio.h>
6#include <linux/blkdev.h>
7#include <linux/mm.h>
8#include <linux/init.h>
9#include <linux/slab.h>
10#include <linux/workqueue.h>
11#include <linux/smp.h>
12
13#include <linux/blk-mq.h>
c7e2d94b 14#include "blk.h"
320ae51f
JA
15#include "blk-mq.h"
16#include "blk-mq-tag.h"
17
18static void blk_mq_sysfs_release(struct kobject *kobj)
19{
1db4909e
ML
20 struct blk_mq_ctxs *ctxs = container_of(kobj, struct blk_mq_ctxs, kobj);
21
22 free_percpu(ctxs->queue_ctx);
23 kfree(ctxs);
24}
25
26static void blk_mq_ctx_sysfs_release(struct kobject *kobj)
27{
28 struct blk_mq_ctx *ctx = container_of(kobj, struct blk_mq_ctx, kobj);
29
30 /* ctx->ctxs won't be released until all ctx are freed */
31 kobject_put(&ctx->ctxs->kobj);
320ae51f
JA
32}
33
6c8b232e
ML
34static void blk_mq_hw_sysfs_release(struct kobject *kobj)
35{
36 struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx,
37 kobj);
c7e2d94b 38
1b97871b
ML
39 cancel_delayed_work_sync(&hctx->run_work);
40
c7e2d94b
ML
41 if (hctx->flags & BLK_MQ_F_BLOCKING)
42 cleanup_srcu_struct(hctx->srcu);
43 blk_free_flush_queue(hctx->fq);
44 sbitmap_free(&hctx->ctx_map);
01388df3 45 free_cpumask_var(hctx->cpumask);
6c8b232e
ML
46 kfree(hctx->ctxs);
47 kfree(hctx);
48}
49
320ae51f
JA
50struct blk_mq_ctx_sysfs_entry {
51 struct attribute attr;
52 ssize_t (*show)(struct blk_mq_ctx *, char *);
53 ssize_t (*store)(struct blk_mq_ctx *, const char *, size_t);
54};
55
56struct blk_mq_hw_ctx_sysfs_entry {
57 struct attribute attr;
58 ssize_t (*show)(struct blk_mq_hw_ctx *, char *);
59 ssize_t (*store)(struct blk_mq_hw_ctx *, const char *, size_t);
60};
61
62static ssize_t blk_mq_sysfs_show(struct kobject *kobj, struct attribute *attr,
63 char *page)
64{
65 struct blk_mq_ctx_sysfs_entry *entry;
66 struct blk_mq_ctx *ctx;
67 struct request_queue *q;
68 ssize_t res;
69
70 entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
71 ctx = container_of(kobj, struct blk_mq_ctx, kobj);
72 q = ctx->queue;
73
74 if (!entry->show)
75 return -EIO;
76
320ae51f 77 mutex_lock(&q->sysfs_lock);
bae85c15 78 res = entry->show(ctx, page);
320ae51f
JA
79 mutex_unlock(&q->sysfs_lock);
80 return res;
81}
82
83static ssize_t blk_mq_sysfs_store(struct kobject *kobj, struct attribute *attr,
84 const char *page, size_t length)
85{
86 struct blk_mq_ctx_sysfs_entry *entry;
87 struct blk_mq_ctx *ctx;
88 struct request_queue *q;
89 ssize_t res;
90
91 entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
92 ctx = container_of(kobj, struct blk_mq_ctx, kobj);
93 q = ctx->queue;
94
95 if (!entry->store)
96 return -EIO;
97
320ae51f 98 mutex_lock(&q->sysfs_lock);
bae85c15 99 res = entry->store(ctx, page, length);
320ae51f
JA
100 mutex_unlock(&q->sysfs_lock);
101 return res;
102}
103
104static ssize_t blk_mq_hw_sysfs_show(struct kobject *kobj,
105 struct attribute *attr, char *page)
106{
107 struct blk_mq_hw_ctx_sysfs_entry *entry;
108 struct blk_mq_hw_ctx *hctx;
109 struct request_queue *q;
110 ssize_t res;
111
112 entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
113 hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
114 q = hctx->queue;
115
116 if (!entry->show)
117 return -EIO;
118
320ae51f 119 mutex_lock(&q->sysfs_lock);
bae85c15 120 res = entry->show(hctx, page);
320ae51f
JA
121 mutex_unlock(&q->sysfs_lock);
122 return res;
123}
124
125static ssize_t blk_mq_hw_sysfs_store(struct kobject *kobj,
126 struct attribute *attr, const char *page,
127 size_t length)
128{
129 struct blk_mq_hw_ctx_sysfs_entry *entry;
130 struct blk_mq_hw_ctx *hctx;
131 struct request_queue *q;
132 ssize_t res;
133
134 entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
135 hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
136 q = hctx->queue;
137
138 if (!entry->store)
139 return -EIO;
140
320ae51f 141 mutex_lock(&q->sysfs_lock);
bae85c15 142 res = entry->store(hctx, page, length);
320ae51f
JA
143 mutex_unlock(&q->sysfs_lock);
144 return res;
145}
146
d96b37c0
OS
147static ssize_t blk_mq_hw_sysfs_nr_tags_show(struct blk_mq_hw_ctx *hctx,
148 char *page)
bd166ef1 149{
d96b37c0 150 return sprintf(page, "%u\n", hctx->tags->nr_tags);
bd166ef1
JA
151}
152
d96b37c0
OS
153static ssize_t blk_mq_hw_sysfs_nr_reserved_tags_show(struct blk_mq_hw_ctx *hctx,
154 char *page)
320ae51f 155{
d96b37c0 156 return sprintf(page, "%u\n", hctx->tags->nr_reserved_tags);
320ae51f
JA
157}
158
676141e4
JA
159static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
160{
cb2da43e 161 unsigned int i, first = 1;
676141e4
JA
162 ssize_t ret = 0;
163
cb2da43e 164 for_each_cpu(i, hctx->cpumask) {
676141e4
JA
165 if (first)
166 ret += sprintf(ret + page, "%u", i);
167 else
168 ret += sprintf(ret + page, ", %u", i);
169
170 first = 0;
171 }
172
676141e4
JA
173 ret += sprintf(ret + page, "\n");
174 return ret;
175}
176
d96b37c0 177static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_tags = {
5657a819 178 .attr = {.name = "nr_tags", .mode = 0444 },
d96b37c0
OS
179 .show = blk_mq_hw_sysfs_nr_tags_show,
180};
181static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_reserved_tags = {
5657a819 182 .attr = {.name = "nr_reserved_tags", .mode = 0444 },
d96b37c0
OS
183 .show = blk_mq_hw_sysfs_nr_reserved_tags_show,
184};
676141e4 185static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus = {
5657a819 186 .attr = {.name = "cpu_list", .mode = 0444 },
676141e4
JA
187 .show = blk_mq_hw_sysfs_cpus_show,
188};
320ae51f
JA
189
190static struct attribute *default_hw_ctx_attrs[] = {
d96b37c0
OS
191 &blk_mq_hw_sysfs_nr_tags.attr,
192 &blk_mq_hw_sysfs_nr_reserved_tags.attr,
676141e4 193 &blk_mq_hw_sysfs_cpus.attr,
320ae51f
JA
194 NULL,
195};
800f5aa1 196ATTRIBUTE_GROUPS(default_hw_ctx);
320ae51f
JA
197
198static const struct sysfs_ops blk_mq_sysfs_ops = {
199 .show = blk_mq_sysfs_show,
200 .store = blk_mq_sysfs_store,
201};
202
203static const struct sysfs_ops blk_mq_hw_sysfs_ops = {
204 .show = blk_mq_hw_sysfs_show,
205 .store = blk_mq_hw_sysfs_store,
206};
207
208static struct kobj_type blk_mq_ktype = {
209 .sysfs_ops = &blk_mq_sysfs_ops,
210 .release = blk_mq_sysfs_release,
211};
212
213static struct kobj_type blk_mq_ctx_ktype = {
214 .sysfs_ops = &blk_mq_sysfs_ops,
1db4909e 215 .release = blk_mq_ctx_sysfs_release,
320ae51f
JA
216};
217
218static struct kobj_type blk_mq_hw_ktype = {
219 .sysfs_ops = &blk_mq_hw_sysfs_ops,
800f5aa1 220 .default_groups = default_hw_ctx_groups,
6c8b232e 221 .release = blk_mq_hw_sysfs_release,
320ae51f
JA
222};
223
ee3c5db0 224static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
67aec14c
JA
225{
226 struct blk_mq_ctx *ctx;
227 int i;
228
4593fdbe 229 if (!hctx->nr_ctx)
67aec14c
JA
230 return;
231
232 hctx_for_each_ctx(hctx, ctx, i)
233 kobject_del(&ctx->kobj);
234
235 kobject_del(&hctx->kobj);
236}
237
ee3c5db0 238static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
67aec14c
JA
239{
240 struct request_queue *q = hctx->queue;
241 struct blk_mq_ctx *ctx;
242 int i, ret;
243
4593fdbe 244 if (!hctx->nr_ctx)
67aec14c
JA
245 return 0;
246
1db4909e 247 ret = kobject_add(&hctx->kobj, q->mq_kobj, "%u", hctx->queue_num);
67aec14c
JA
248 if (ret)
249 return ret;
250
251 hctx_for_each_ctx(hctx, ctx, i) {
252 ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
253 if (ret)
254 break;
255 }
256
257 return ret;
258}
259
667257e8 260void blk_mq_unregister_dev(struct device *dev, struct request_queue *q)
320ae51f 261{
85157366 262 struct blk_mq_hw_ctx *hctx;
7ea5fe31 263 int i;
85157366 264
cecf5d87 265 lockdep_assert_held(&q->sysfs_dir_lock);
2d0364c8 266
6c8b232e 267 queue_for_each_hw_ctx(q, hctx, i)
67aec14c
JA
268 blk_mq_unregister_hctx(hctx);
269
1db4909e
ML
270 kobject_uevent(q->mq_kobj, KOBJ_REMOVE);
271 kobject_del(q->mq_kobj);
b21d5b30 272 kobject_put(&dev->kobj);
4593fdbe
AM
273
274 q->mq_sysfs_init_done = false;
c0f3fd2b
JA
275}
276
868f2f0b
KB
277void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx)
278{
279 kobject_init(&hctx->kobj, &blk_mq_hw_ktype);
280}
281
7ea5fe31
ML
282void blk_mq_sysfs_deinit(struct request_queue *q)
283{
284 struct blk_mq_ctx *ctx;
285 int cpu;
286
287 for_each_possible_cpu(cpu) {
288 ctx = per_cpu_ptr(q->queue_ctx, cpu);
289 kobject_put(&ctx->kobj);
290 }
1db4909e 291 kobject_put(q->mq_kobj);
7ea5fe31
ML
292}
293
737f98cf 294void blk_mq_sysfs_init(struct request_queue *q)
67aec14c 295{
67aec14c 296 struct blk_mq_ctx *ctx;
897bb0c7 297 int cpu;
67aec14c 298
1db4909e 299 kobject_init(q->mq_kobj, &blk_mq_ktype);
67aec14c 300
897bb0c7
TG
301 for_each_possible_cpu(cpu) {
302 ctx = per_cpu_ptr(q->queue_ctx, cpu);
1db4909e
ML
303
304 kobject_get(q->mq_kobj);
06a41a99 305 kobject_init(&ctx->kobj, &blk_mq_ctx_ktype);
897bb0c7 306 }
67aec14c
JA
307}
308
2d0364c8 309int __blk_mq_register_dev(struct device *dev, struct request_queue *q)
320ae51f 310{
320ae51f 311 struct blk_mq_hw_ctx *hctx;
67aec14c 312 int ret, i;
320ae51f 313
2d0364c8 314 WARN_ON_ONCE(!q->kobj.parent);
cecf5d87 315 lockdep_assert_held(&q->sysfs_dir_lock);
4593fdbe 316
1db4909e 317 ret = kobject_add(q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
320ae51f 318 if (ret < 0)
4593fdbe 319 goto out;
320ae51f 320
1db4909e 321 kobject_uevent(q->mq_kobj, KOBJ_ADD);
320ae51f
JA
322
323 queue_for_each_hw_ctx(q, hctx, i) {
67aec14c 324 ret = blk_mq_register_hctx(hctx);
320ae51f 325 if (ret)
f05d1ba7 326 goto unreg;
320ae51f
JA
327 }
328
f05d1ba7 329 q->mq_sysfs_init_done = true;
2d0364c8 330
4593fdbe 331out:
2d0364c8 332 return ret;
f05d1ba7
BVA
333
334unreg:
335 while (--i >= 0)
336 blk_mq_unregister_hctx(q->queue_hw_ctx[i]);
337
1db4909e
ML
338 kobject_uevent(q->mq_kobj, KOBJ_REMOVE);
339 kobject_del(q->mq_kobj);
f05d1ba7
BVA
340 kobject_put(&dev->kobj);
341 return ret;
2d0364c8
BVA
342}
343
67aec14c
JA
344void blk_mq_sysfs_unregister(struct request_queue *q)
345{
346 struct blk_mq_hw_ctx *hctx;
347 int i;
348
cecf5d87 349 mutex_lock(&q->sysfs_dir_lock);
4593fdbe 350 if (!q->mq_sysfs_init_done)
2d0364c8 351 goto unlock;
4593fdbe 352
67aec14c
JA
353 queue_for_each_hw_ctx(q, hctx, i)
354 blk_mq_unregister_hctx(hctx);
2d0364c8
BVA
355
356unlock:
cecf5d87 357 mutex_unlock(&q->sysfs_dir_lock);
67aec14c
JA
358}
359
360int blk_mq_sysfs_register(struct request_queue *q)
361{
362 struct blk_mq_hw_ctx *hctx;
363 int i, ret = 0;
364
cecf5d87 365 mutex_lock(&q->sysfs_dir_lock);
4593fdbe 366 if (!q->mq_sysfs_init_done)
2d0364c8 367 goto unlock;
4593fdbe 368
67aec14c
JA
369 queue_for_each_hw_ctx(q, hctx, i) {
370 ret = blk_mq_register_hctx(hctx);
371 if (ret)
372 break;
373 }
374
2d0364c8 375unlock:
cecf5d87 376 mutex_unlock(&q->sysfs_dir_lock);
2d0364c8 377
67aec14c
JA
378 return ret;
379}