Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
8324aa91 JA |
2 | /* |
3 | * Functions related to sysfs handling | |
4 | */ | |
5 | #include <linux/kernel.h> | |
5a0e3ad6 | 6 | #include <linux/slab.h> |
8324aa91 JA |
7 | #include <linux/module.h> |
8 | #include <linux/bio.h> | |
9 | #include <linux/blkdev.h> | |
66114cad | 10 | #include <linux/backing-dev.h> |
8324aa91 | 11 | #include <linux/blktrace_api.h> |
85e0cbbb | 12 | #include <linux/debugfs.h> |
8324aa91 JA |
13 | |
14 | #include "blk.h" | |
3edcc0ce | 15 | #include "blk-mq.h" |
d173a251 | 16 | #include "blk-mq-debugfs.h" |
2aa7745b | 17 | #include "blk-mq-sched.h" |
0bc65bd4 | 18 | #include "blk-rq-qos.h" |
87760e5e | 19 | #include "blk-wbt.h" |
672fdcf0 | 20 | #include "blk-cgroup.h" |
a7b36ee6 | 21 | #include "blk-throttle.h" |
8324aa91 JA |
22 | |
23 | struct queue_sysfs_entry { | |
24 | struct attribute attr; | |
62e35f94 | 25 | ssize_t (*show)(struct gendisk *disk, char *page); |
6e51a127 NS |
26 | ssize_t (*show_limit)(struct gendisk *disk, char *page); |
27 | ||
62e35f94 | 28 | ssize_t (*store)(struct gendisk *disk, const char *page, size_t count); |
a1623064 CH |
29 | int (*store_limit)(struct gendisk *disk, const char *page, |
30 | size_t count, struct queue_limits *lim); | |
8324aa91 JA |
31 | }; |
32 | ||
33 | static ssize_t | |
9cb308ce | 34 | queue_var_show(unsigned long var, char *page) |
8324aa91 | 35 | { |
8e71afb9 | 36 | return sysfs_emit(page, "%lu\n", var); |
8324aa91 JA |
37 | } |
38 | ||
39 | static ssize_t | |
40 | queue_var_store(unsigned long *var, const char *page, size_t count) | |
41 | { | |
b1f3b64d DR |
42 | int err; |
43 | unsigned long v; | |
44 | ||
ed751e68 | 45 | err = kstrtoul(page, 10, &v); |
b1f3b64d DR |
46 | if (err || v > UINT_MAX) |
47 | return -EINVAL; | |
48 | ||
49 | *var = v; | |
8324aa91 | 50 | |
8324aa91 JA |
51 | return count; |
52 | } | |
53 | ||
62e35f94 | 54 | static ssize_t queue_requests_show(struct gendisk *disk, char *page) |
8324aa91 | 55 | { |
b07a889e NS |
56 | ssize_t ret; |
57 | ||
3efe7571 | 58 | mutex_lock(&disk->queue->elevator_lock); |
b07a889e | 59 | ret = queue_var_show(disk->queue->nr_requests, page); |
3efe7571 | 60 | mutex_unlock(&disk->queue->elevator_lock); |
b07a889e | 61 | return ret; |
8324aa91 JA |
62 | } |
63 | ||
64 | static ssize_t | |
62e35f94 | 65 | queue_requests_store(struct gendisk *disk, const char *page, size_t count) |
8324aa91 | 66 | { |
8324aa91 | 67 | unsigned long nr; |
e3a2b3f9 | 68 | int ret, err; |
b07a889e NS |
69 | unsigned int memflags; |
70 | struct request_queue *q = disk->queue; | |
b8a9ae77 | 71 | |
b07a889e | 72 | if (!queue_is_mq(q)) |
b8a9ae77 JA |
73 | return -EINVAL; |
74 | ||
75 | ret = queue_var_store(&nr, page, count); | |
b1f3b64d DR |
76 | if (ret < 0) |
77 | return ret; | |
78 | ||
b07a889e | 79 | memflags = blk_mq_freeze_queue(q); |
3efe7571 | 80 | mutex_lock(&q->elevator_lock); |
8324aa91 JA |
81 | if (nr < BLKDEV_MIN_RQ) |
82 | nr = BLKDEV_MIN_RQ; | |
83 | ||
62e35f94 | 84 | err = blk_mq_update_nr_requests(disk->queue, nr); |
e3a2b3f9 | 85 | if (err) |
b07a889e | 86 | ret = err; |
3efe7571 | 87 | mutex_unlock(&q->elevator_lock); |
b07a889e | 88 | blk_mq_unfreeze_queue(q, memflags); |
8324aa91 JA |
89 | return ret; |
90 | } | |
91 | ||
62e35f94 | 92 | static ssize_t queue_ra_show(struct gendisk *disk, char *page) |
8324aa91 | 93 | { |
b07a889e NS |
94 | ssize_t ret; |
95 | ||
5e40f445 | 96 | mutex_lock(&disk->queue->limits_lock); |
b07a889e | 97 | ret = queue_var_show(disk->bdi->ra_pages << (PAGE_SHIFT - 10), page); |
5e40f445 | 98 | mutex_unlock(&disk->queue->limits_lock); |
b07a889e NS |
99 | |
100 | return ret; | |
8324aa91 JA |
101 | } |
102 | ||
103 | static ssize_t | |
62e35f94 | 104 | queue_ra_store(struct gendisk *disk, const char *page, size_t count) |
8324aa91 JA |
105 | { |
106 | unsigned long ra_kb; | |
edb0872f | 107 | ssize_t ret; |
b07a889e NS |
108 | unsigned int memflags; |
109 | struct request_queue *q = disk->queue; | |
8324aa91 | 110 | |
edb0872f | 111 | ret = queue_var_store(&ra_kb, page, count); |
b1f3b64d DR |
112 | if (ret < 0) |
113 | return ret; | |
5e40f445 NS |
114 | /* |
115 | * ->ra_pages is protected by ->limits_lock because it is usually | |
116 | * calculated from the queue limits by queue_limits_commit_update. | |
117 | */ | |
118 | mutex_lock(&q->limits_lock); | |
b07a889e | 119 | memflags = blk_mq_freeze_queue(q); |
62e35f94 | 120 | disk->bdi->ra_pages = ra_kb >> (PAGE_SHIFT - 10); |
5e40f445 | 121 | mutex_unlock(&q->limits_lock); |
b07a889e | 122 | blk_mq_unfreeze_queue(q, memflags); |
b07a889e | 123 | |
8324aa91 JA |
124 | return ret; |
125 | } | |
126 | ||
319e8cfd | 127 | #define QUEUE_SYSFS_LIMIT_SHOW(_field) \ |
62e35f94 | 128 | static ssize_t queue_##_field##_show(struct gendisk *disk, char *page) \ |
319e8cfd | 129 | { \ |
62e35f94 | 130 | return queue_var_show(disk->queue->limits._field, page); \ |
319e8cfd CH |
131 | } |
132 | ||
133 | QUEUE_SYSFS_LIMIT_SHOW(max_segments) | |
134 | QUEUE_SYSFS_LIMIT_SHOW(max_discard_segments) | |
135 | QUEUE_SYSFS_LIMIT_SHOW(max_integrity_segments) | |
136 | QUEUE_SYSFS_LIMIT_SHOW(max_segment_size) | |
d2f526ba | 137 | QUEUE_SYSFS_LIMIT_SHOW(max_write_streams) |
c23acfac | 138 | QUEUE_SYSFS_LIMIT_SHOW(write_stream_granularity) |
319e8cfd CH |
139 | QUEUE_SYSFS_LIMIT_SHOW(logical_block_size) |
140 | QUEUE_SYSFS_LIMIT_SHOW(physical_block_size) | |
141 | QUEUE_SYSFS_LIMIT_SHOW(chunk_sectors) | |
142 | QUEUE_SYSFS_LIMIT_SHOW(io_min) | |
143 | QUEUE_SYSFS_LIMIT_SHOW(io_opt) | |
144 | QUEUE_SYSFS_LIMIT_SHOW(discard_granularity) | |
145 | QUEUE_SYSFS_LIMIT_SHOW(zone_write_granularity) | |
146 | QUEUE_SYSFS_LIMIT_SHOW(virt_boundary_mask) | |
147 | QUEUE_SYSFS_LIMIT_SHOW(dma_alignment) | |
148 | QUEUE_SYSFS_LIMIT_SHOW(max_open_zones) | |
149 | QUEUE_SYSFS_LIMIT_SHOW(max_active_zones) | |
150 | QUEUE_SYSFS_LIMIT_SHOW(atomic_write_unit_min) | |
151 | QUEUE_SYSFS_LIMIT_SHOW(atomic_write_unit_max) | |
152 | ||
153 | #define QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(_field) \ | |
62e35f94 | 154 | static ssize_t queue_##_field##_show(struct gendisk *disk, char *page) \ |
319e8cfd | 155 | { \ |
8e71afb9 | 156 | return sysfs_emit(page, "%llu\n", \ |
62e35f94 CH |
157 | (unsigned long long)disk->queue->limits._field << \ |
158 | SECTOR_SHIFT); \ | |
319e8cfd CH |
159 | } |
160 | ||
161 | QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(max_discard_sectors) | |
162 | QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(max_hw_discard_sectors) | |
163 | QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(max_write_zeroes_sectors) | |
164 | QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(atomic_write_max_sectors) | |
165 | QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(atomic_write_boundary_sectors) | |
559218d4 | 166 | QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_BYTES(max_zone_append_sectors) |
319e8cfd CH |
167 | |
168 | #define QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_KB(_field) \ | |
62e35f94 | 169 | static ssize_t queue_##_field##_show(struct gendisk *disk, char *page) \ |
319e8cfd | 170 | { \ |
62e35f94 | 171 | return queue_var_show(disk->queue->limits._field >> 1, page); \ |
319e8cfd CH |
172 | } |
173 | ||
174 | QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_KB(max_sectors) | |
175 | QUEUE_SYSFS_LIMIT_SHOW_SECTORS_TO_KB(max_hw_sectors) | |
176 | ||
177 | #define QUEUE_SYSFS_SHOW_CONST(_name, _val) \ | |
62e35f94 | 178 | static ssize_t queue_##_name##_show(struct gendisk *disk, char *page) \ |
319e8cfd | 179 | { \ |
d23977fe | 180 | return sysfs_emit(page, "%d\n", _val); \ |
86b37281 MP |
181 | } |
182 | ||
319e8cfd CH |
183 | /* deprecated fields */ |
184 | QUEUE_SYSFS_SHOW_CONST(discard_zeroes_data, 0) | |
185 | QUEUE_SYSFS_SHOW_CONST(write_same_max, 0) | |
186 | QUEUE_SYSFS_SHOW_CONST(poll_delay, -1) | |
0034af03 | 187 | |
a1623064 CH |
188 | static int queue_max_discard_sectors_store(struct gendisk *disk, |
189 | const char *page, size_t count, struct queue_limits *lim) | |
0034af03 | 190 | { |
4f563a64 CH |
191 | unsigned long max_discard_bytes; |
192 | ssize_t ret; | |
0034af03 | 193 | |
4f563a64 | 194 | ret = queue_var_store(&max_discard_bytes, page, count); |
0034af03 JA |
195 | if (ret < 0) |
196 | return ret; | |
197 | ||
62e35f94 | 198 | if (max_discard_bytes & (disk->queue->limits.discard_granularity - 1)) |
0034af03 JA |
199 | return -EINVAL; |
200 | ||
4f563a64 | 201 | if ((max_discard_bytes >> SECTOR_SHIFT) > UINT_MAX) |
0034af03 JA |
202 | return -EINVAL; |
203 | ||
a1623064 CH |
204 | lim->max_user_discard_sectors = max_discard_bytes >> SECTOR_SHIFT; |
205 | return 0; | |
0034af03 JA |
206 | } |
207 | ||
a1623064 CH |
208 | static int |
209 | queue_max_sectors_store(struct gendisk *disk, const char *page, size_t count, | |
210 | struct queue_limits *lim) | |
8324aa91 | 211 | { |
0327ca9d | 212 | unsigned long max_sectors_kb; |
0327ca9d | 213 | ssize_t ret; |
8324aa91 | 214 | |
0327ca9d | 215 | ret = queue_var_store(&max_sectors_kb, page, count); |
b1f3b64d DR |
216 | if (ret < 0) |
217 | return ret; | |
218 | ||
a1623064 CH |
219 | lim->max_user_sectors = max_sectors_kb << 1; |
220 | return 0; | |
8324aa91 JA |
221 | } |
222 | ||
62e35f94 | 223 | static ssize_t queue_feature_store(struct gendisk *disk, const char *page, |
a1623064 | 224 | size_t count, struct queue_limits *lim, blk_features_t feature) |
bd4a633b | 225 | { |
bd4a633b CH |
226 | unsigned long val; |
227 | ssize_t ret; | |
228 | ||
229 | ret = queue_var_store(&val, page, count); | |
230 | if (ret < 0) | |
231 | return ret; | |
232 | ||
bd4a633b | 233 | if (val) |
a1623064 | 234 | lim->features |= feature; |
bd4a633b | 235 | else |
a1623064 CH |
236 | lim->features &= ~feature; |
237 | return 0; | |
bd4a633b CH |
238 | } |
239 | ||
62e35f94 CH |
240 | #define QUEUE_SYSFS_FEATURE(_name, _feature) \ |
241 | static ssize_t queue_##_name##_show(struct gendisk *disk, char *page) \ | |
242 | { \ | |
8e71afb9 | 243 | return sysfs_emit(page, "%u\n", \ |
62e35f94 CH |
244 | !!(disk->queue->limits.features & _feature)); \ |
245 | } \ | |
a1623064 CH |
246 | static int queue_##_name##_store(struct gendisk *disk, \ |
247 | const char *page, size_t count, struct queue_limits *lim) \ | |
62e35f94 | 248 | { \ |
a1623064 | 249 | return queue_feature_store(disk, page, count, lim, _feature); \ |
bd4a633b CH |
250 | } |
251 | ||
bd4a633b | 252 | QUEUE_SYSFS_FEATURE(rotational, BLK_FEAT_ROTATIONAL) |
39a9f1c3 | 253 | QUEUE_SYSFS_FEATURE(add_random, BLK_FEAT_ADD_RANDOM) |
cdb24979 | 254 | QUEUE_SYSFS_FEATURE(iostats, BLK_FEAT_IO_STAT) |
1a02f3a7 | 255 | QUEUE_SYSFS_FEATURE(stable_writes, BLK_FEAT_STABLE_WRITES); |
1308835f | 256 | |
62e35f94 CH |
257 | #define QUEUE_SYSFS_FEATURE_SHOW(_name, _feature) \ |
258 | static ssize_t queue_##_name##_show(struct gendisk *disk, char *page) \ | |
259 | { \ | |
8e71afb9 | 260 | return sysfs_emit(page, "%u\n", \ |
62e35f94 | 261 | !!(disk->queue->limits.features & _feature)); \ |
319e8cfd CH |
262 | } |
263 | ||
319e8cfd CH |
264 | QUEUE_SYSFS_FEATURE_SHOW(fua, BLK_FEAT_FUA); |
265 | QUEUE_SYSFS_FEATURE_SHOW(dax, BLK_FEAT_DAX); | |
266 | ||
d432c817 CH |
267 | static ssize_t queue_poll_show(struct gendisk *disk, char *page) |
268 | { | |
d23977fe NS |
269 | if (queue_is_mq(disk->queue)) |
270 | return sysfs_emit(page, "%u\n", blk_mq_can_poll(disk->queue)); | |
b07a889e | 271 | |
d23977fe | 272 | return sysfs_emit(page, "%u\n", |
b07a889e | 273 | !!(disk->queue->limits.features & BLK_FEAT_POLL)); |
d432c817 CH |
274 | } |
275 | ||
62e35f94 | 276 | static ssize_t queue_zoned_show(struct gendisk *disk, char *page) |
797476b8 | 277 | { |
62e35f94 | 278 | if (blk_queue_is_zoned(disk->queue)) |
8e71afb9 | 279 | return sysfs_emit(page, "host-managed\n"); |
280 | return sysfs_emit(page, "none\n"); | |
797476b8 DLM |
281 | } |
282 | ||
62e35f94 | 283 | static ssize_t queue_nr_zones_show(struct gendisk *disk, char *page) |
965b652e | 284 | { |
d23977fe | 285 | return queue_var_show(disk_nr_zones(disk), page); |
965b652e DLM |
286 | } |
287 | ||
110234da KB |
288 | static ssize_t queue_iostats_passthrough_show(struct gendisk *disk, char *page) |
289 | { | |
a6fe7b70 | 290 | return queue_var_show(!!blk_queue_passthrough_stat(disk->queue), page); |
110234da KB |
291 | } |
292 | ||
a1623064 CH |
293 | static int queue_iostats_passthrough_store(struct gendisk *disk, |
294 | const char *page, size_t count, struct queue_limits *lim) | |
110234da | 295 | { |
110234da KB |
296 | unsigned long ios; |
297 | ssize_t ret; | |
298 | ||
299 | ret = queue_var_store(&ios, page, count); | |
300 | if (ret < 0) | |
301 | return ret; | |
302 | ||
110234da | 303 | if (ios) |
a1623064 | 304 | lim->flags |= BLK_FLAG_IOSTATS_PASSTHROUGH; |
110234da | 305 | else |
a1623064 CH |
306 | lim->flags &= ~BLK_FLAG_IOSTATS_PASSTHROUGH; |
307 | return 0; | |
110234da | 308 | } |
a1623064 | 309 | |
62e35f94 | 310 | static ssize_t queue_nomerges_show(struct gendisk *disk, char *page) |
ac9fafa1 | 311 | { |
d23977fe | 312 | return queue_var_show((blk_queue_nomerges(disk->queue) << 1) | |
62e35f94 | 313 | blk_queue_noxmerges(disk->queue), page); |
ac9fafa1 AB |
314 | } |
315 | ||
62e35f94 | 316 | static ssize_t queue_nomerges_store(struct gendisk *disk, const char *page, |
ac9fafa1 AB |
317 | size_t count) |
318 | { | |
319 | unsigned long nm; | |
b07a889e NS |
320 | unsigned int memflags; |
321 | struct request_queue *q = disk->queue; | |
ac9fafa1 AB |
322 | ssize_t ret = queue_var_store(&nm, page, count); |
323 | ||
b1f3b64d DR |
324 | if (ret < 0) |
325 | return ret; | |
326 | ||
b07a889e NS |
327 | memflags = blk_mq_freeze_queue(q); |
328 | blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, q); | |
329 | blk_queue_flag_clear(QUEUE_FLAG_NOXMERGES, q); | |
488991e2 | 330 | if (nm == 2) |
b07a889e | 331 | blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q); |
488991e2 | 332 | else if (nm) |
b07a889e NS |
333 | blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q); |
334 | blk_mq_unfreeze_queue(q, memflags); | |
1308835f | 335 | |
ac9fafa1 AB |
336 | return ret; |
337 | } | |
338 | ||
62e35f94 | 339 | static ssize_t queue_rq_affinity_show(struct gendisk *disk, char *page) |
c7c22e4d | 340 | { |
d23977fe NS |
341 | bool set = test_bit(QUEUE_FLAG_SAME_COMP, &disk->queue->queue_flags); |
342 | bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &disk->queue->queue_flags); | |
c7c22e4d | 343 | |
d23977fe | 344 | return queue_var_show(set << force, page); |
c7c22e4d JA |
345 | } |
346 | ||
347 | static ssize_t | |
62e35f94 | 348 | queue_rq_affinity_store(struct gendisk *disk, const char *page, size_t count) |
c7c22e4d JA |
349 | { |
350 | ssize_t ret = -EINVAL; | |
0a06ff06 | 351 | #ifdef CONFIG_SMP |
62e35f94 | 352 | struct request_queue *q = disk->queue; |
c7c22e4d | 353 | unsigned long val; |
b07a889e | 354 | unsigned int memflags; |
c7c22e4d JA |
355 | |
356 | ret = queue_var_store(&val, page, count); | |
b1f3b64d DR |
357 | if (ret < 0) |
358 | return ret; | |
359 | ||
d23977fe NS |
360 | /* |
361 | * Here we update two queue flags each using atomic bitops, although | |
362 | * updating two flags isn't atomic it should be harmless as those flags | |
363 | * are accessed individually using atomic test_bit operation. So we | |
364 | * don't grab any lock while updating these flags. | |
365 | */ | |
b07a889e | 366 | memflags = blk_mq_freeze_queue(q); |
e8037d49 | 367 | if (val == 2) { |
57d74df9 CH |
368 | blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q); |
369 | blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, q); | |
e8037d49 | 370 | } else if (val == 1) { |
57d74df9 CH |
371 | blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q); |
372 | blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); | |
e8037d49 | 373 | } else if (val == 0) { |
57d74df9 CH |
374 | blk_queue_flag_clear(QUEUE_FLAG_SAME_COMP, q); |
375 | blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); | |
5757a6d7 | 376 | } |
b07a889e | 377 | blk_mq_unfreeze_queue(q, memflags); |
c7c22e4d JA |
378 | #endif |
379 | return ret; | |
380 | } | |
8324aa91 | 381 | |
62e35f94 | 382 | static ssize_t queue_poll_delay_store(struct gendisk *disk, const char *page, |
06426adf JA |
383 | size_t count) |
384 | { | |
64f1c21e | 385 | return count; |
06426adf JA |
386 | } |
387 | ||
62e35f94 | 388 | static ssize_t queue_poll_store(struct gendisk *disk, const char *page, |
05229bee JA |
389 | size_t count) |
390 | { | |
b07a889e NS |
391 | unsigned int memflags; |
392 | ssize_t ret = count; | |
393 | struct request_queue *q = disk->queue; | |
394 | ||
b07a889e NS |
395 | memflags = blk_mq_freeze_queue(q); |
396 | if (!(q->limits.features & BLK_FEAT_POLL)) { | |
397 | ret = -EINVAL; | |
398 | goto out; | |
399 | } | |
d23977fe | 400 | |
a614dd22 CH |
401 | pr_info_ratelimited("writes to the poll attribute are ignored.\n"); |
402 | pr_info_ratelimited("please use driver specific parameters instead.\n"); | |
b07a889e NS |
403 | out: |
404 | blk_mq_unfreeze_queue(q, memflags); | |
b07a889e | 405 | return ret; |
05229bee JA |
406 | } |
407 | ||
62e35f94 | 408 | static ssize_t queue_io_timeout_show(struct gendisk *disk, char *page) |
65cd1d13 | 409 | { |
d23977fe NS |
410 | return sysfs_emit(page, "%u\n", |
411 | jiffies_to_msecs(READ_ONCE(disk->queue->rq_timeout))); | |
65cd1d13 WZ |
412 | } |
413 | ||
62e35f94 | 414 | static ssize_t queue_io_timeout_store(struct gendisk *disk, const char *page, |
65cd1d13 WZ |
415 | size_t count) |
416 | { | |
b07a889e | 417 | unsigned int val, memflags; |
65cd1d13 | 418 | int err; |
b07a889e | 419 | struct request_queue *q = disk->queue; |
65cd1d13 WZ |
420 | |
421 | err = kstrtou32(page, 10, &val); | |
422 | if (err || val == 0) | |
423 | return -EINVAL; | |
424 | ||
b07a889e NS |
425 | memflags = blk_mq_freeze_queue(q); |
426 | blk_queue_rq_timeout(q, msecs_to_jiffies(val)); | |
427 | blk_mq_unfreeze_queue(q, memflags); | |
65cd1d13 WZ |
428 | |
429 | return count; | |
430 | } | |
431 | ||
62e35f94 | 432 | static ssize_t queue_wc_show(struct gendisk *disk, char *page) |
93e9d8e8 | 433 | { |
62e35f94 | 434 | if (blk_queue_write_cache(disk->queue)) |
8e71afb9 | 435 | return sysfs_emit(page, "write back\n"); |
436 | return sysfs_emit(page, "write through\n"); | |
93e9d8e8 JA |
437 | } |
438 | ||
a1623064 CH |
439 | static int queue_wc_store(struct gendisk *disk, const char *page, |
440 | size_t count, struct queue_limits *lim) | |
93e9d8e8 | 441 | { |
1122c0c1 | 442 | bool disable; |
1122c0c1 | 443 | |
43c9835b | 444 | if (!strncmp(page, "write back", 10)) { |
1122c0c1 | 445 | disable = false; |
43c9835b | 446 | } else if (!strncmp(page, "write through", 13) || |
1122c0c1 CH |
447 | !strncmp(page, "none", 4)) { |
448 | disable = true; | |
43c9835b | 449 | } else { |
c4e21bcd | 450 | return -EINVAL; |
43c9835b | 451 | } |
93e9d8e8 | 452 | |
1122c0c1 | 453 | if (disable) |
a1623064 | 454 | lim->flags |= BLK_FLAG_WRITE_CACHE_DISABLED; |
1122c0c1 | 455 | else |
a1623064 CH |
456 | lim->flags &= ~BLK_FLAG_WRITE_CACHE_DISABLED; |
457 | return 0; | |
93e9d8e8 JA |
458 | } |
459 | ||
35626147 CH |
460 | #define QUEUE_RO_ENTRY(_prefix, _name) \ |
461 | static struct queue_sysfs_entry _prefix##_entry = { \ | |
462 | .attr = { .name = _name, .mode = 0444 }, \ | |
463 | .show = _prefix##_show, \ | |
464 | }; | |
465 | ||
466 | #define QUEUE_RW_ENTRY(_prefix, _name) \ | |
467 | static struct queue_sysfs_entry _prefix##_entry = { \ | |
468 | .attr = { .name = _name, .mode = 0644 }, \ | |
469 | .show = _prefix##_show, \ | |
470 | .store = _prefix##_store, \ | |
471 | }; | |
472 | ||
6e51a127 NS |
473 | #define QUEUE_LIM_RO_ENTRY(_prefix, _name) \ |
474 | static struct queue_sysfs_entry _prefix##_entry = { \ | |
475 | .attr = { .name = _name, .mode = 0444 }, \ | |
476 | .show_limit = _prefix##_show, \ | |
477 | } | |
478 | ||
a1623064 CH |
479 | #define QUEUE_LIM_RW_ENTRY(_prefix, _name) \ |
480 | static struct queue_sysfs_entry _prefix##_entry = { \ | |
481 | .attr = { .name = _name, .mode = 0644 }, \ | |
6e51a127 | 482 | .show_limit = _prefix##_show, \ |
a1623064 CH |
483 | .store_limit = _prefix##_store, \ |
484 | } | |
485 | ||
35626147 CH |
486 | QUEUE_RW_ENTRY(queue_requests, "nr_requests"); |
487 | QUEUE_RW_ENTRY(queue_ra, "read_ahead_kb"); | |
a1623064 | 488 | QUEUE_LIM_RW_ENTRY(queue_max_sectors, "max_sectors_kb"); |
6e51a127 NS |
489 | QUEUE_LIM_RO_ENTRY(queue_max_hw_sectors, "max_hw_sectors_kb"); |
490 | QUEUE_LIM_RO_ENTRY(queue_max_segments, "max_segments"); | |
491 | QUEUE_LIM_RO_ENTRY(queue_max_integrity_segments, "max_integrity_segments"); | |
492 | QUEUE_LIM_RO_ENTRY(queue_max_segment_size, "max_segment_size"); | |
d2f526ba | 493 | QUEUE_LIM_RO_ENTRY(queue_max_write_streams, "max_write_streams"); |
c23acfac | 494 | QUEUE_LIM_RO_ENTRY(queue_write_stream_granularity, "write_stream_granularity"); |
b07a889e | 495 | QUEUE_RW_ENTRY(elv_iosched, "scheduler"); |
35626147 | 496 | |
6e51a127 NS |
497 | QUEUE_LIM_RO_ENTRY(queue_logical_block_size, "logical_block_size"); |
498 | QUEUE_LIM_RO_ENTRY(queue_physical_block_size, "physical_block_size"); | |
499 | QUEUE_LIM_RO_ENTRY(queue_chunk_sectors, "chunk_sectors"); | |
500 | QUEUE_LIM_RO_ENTRY(queue_io_min, "minimum_io_size"); | |
501 | QUEUE_LIM_RO_ENTRY(queue_io_opt, "optimal_io_size"); | |
35626147 | 502 | |
6e51a127 NS |
503 | QUEUE_LIM_RO_ENTRY(queue_max_discard_segments, "max_discard_segments"); |
504 | QUEUE_LIM_RO_ENTRY(queue_discard_granularity, "discard_granularity"); | |
505 | QUEUE_LIM_RO_ENTRY(queue_max_hw_discard_sectors, "discard_max_hw_bytes"); | |
a1623064 | 506 | QUEUE_LIM_RW_ENTRY(queue_max_discard_sectors, "discard_max_bytes"); |
35626147 CH |
507 | QUEUE_RO_ENTRY(queue_discard_zeroes_data, "discard_zeroes_data"); |
508 | ||
6e51a127 NS |
509 | QUEUE_LIM_RO_ENTRY(queue_atomic_write_max_sectors, "atomic_write_max_bytes"); |
510 | QUEUE_LIM_RO_ENTRY(queue_atomic_write_boundary_sectors, | |
319e8cfd | 511 | "atomic_write_boundary_bytes"); |
6e51a127 NS |
512 | QUEUE_LIM_RO_ENTRY(queue_atomic_write_unit_max, "atomic_write_unit_max_bytes"); |
513 | QUEUE_LIM_RO_ENTRY(queue_atomic_write_unit_min, "atomic_write_unit_min_bytes"); | |
9da3d1e9 | 514 | |
35626147 | 515 | QUEUE_RO_ENTRY(queue_write_same_max, "write_same_max_bytes"); |
6e51a127 NS |
516 | QUEUE_LIM_RO_ENTRY(queue_max_write_zeroes_sectors, "write_zeroes_max_bytes"); |
517 | QUEUE_LIM_RO_ENTRY(queue_max_zone_append_sectors, "zone_append_max_bytes"); | |
518 | QUEUE_LIM_RO_ENTRY(queue_zone_write_granularity, "zone_write_granularity"); | |
35626147 | 519 | |
6e51a127 | 520 | QUEUE_LIM_RO_ENTRY(queue_zoned, "zoned"); |
35626147 | 521 | QUEUE_RO_ENTRY(queue_nr_zones, "nr_zones"); |
6e51a127 NS |
522 | QUEUE_LIM_RO_ENTRY(queue_max_open_zones, "max_open_zones"); |
523 | QUEUE_LIM_RO_ENTRY(queue_max_active_zones, "max_active_zones"); | |
35626147 CH |
524 | |
525 | QUEUE_RW_ENTRY(queue_nomerges, "nomerges"); | |
a1623064 | 526 | QUEUE_LIM_RW_ENTRY(queue_iostats_passthrough, "iostats_passthrough"); |
35626147 CH |
527 | QUEUE_RW_ENTRY(queue_rq_affinity, "rq_affinity"); |
528 | QUEUE_RW_ENTRY(queue_poll, "io_poll"); | |
529 | QUEUE_RW_ENTRY(queue_poll_delay, "io_poll_delay"); | |
a1623064 | 530 | QUEUE_LIM_RW_ENTRY(queue_wc, "write_cache"); |
6e51a127 NS |
531 | QUEUE_LIM_RO_ENTRY(queue_fua, "fua"); |
532 | QUEUE_LIM_RO_ENTRY(queue_dax, "dax"); | |
35626147 | 533 | QUEUE_RW_ENTRY(queue_io_timeout, "io_timeout"); |
6e51a127 NS |
534 | QUEUE_LIM_RO_ENTRY(queue_virt_boundary_mask, "virt_boundary_mask"); |
535 | QUEUE_LIM_RO_ENTRY(queue_dma_alignment, "dma_alignment"); | |
8324aa91 | 536 | |
35626147 | 537 | /* legacy alias for logical_block_size: */ |
e68b903c | 538 | static struct queue_sysfs_entry queue_hw_sector_size_entry = { |
6e51a127 NS |
539 | .attr = {.name = "hw_sector_size", .mode = 0444 }, |
540 | .show_limit = queue_logical_block_size_show, | |
e1defc4f MP |
541 | }; |
542 | ||
a1623064 CH |
543 | QUEUE_LIM_RW_ENTRY(queue_rotational, "rotational"); |
544 | QUEUE_LIM_RW_ENTRY(queue_iostats, "iostats"); | |
545 | QUEUE_LIM_RW_ENTRY(queue_add_random, "add_random"); | |
546 | QUEUE_LIM_RW_ENTRY(queue_stable_writes, "stable_writes"); | |
e2e1a148 | 547 | |
645a829e YK |
548 | #ifdef CONFIG_BLK_WBT |
549 | static ssize_t queue_var_store64(s64 *var, const char *page) | |
550 | { | |
551 | int err; | |
552 | s64 v; | |
553 | ||
554 | err = kstrtos64(page, 10, &v); | |
555 | if (err < 0) | |
556 | return err; | |
557 | ||
558 | *var = v; | |
559 | return 0; | |
560 | } | |
561 | ||
62e35f94 | 562 | static ssize_t queue_wb_lat_show(struct gendisk *disk, char *page) |
645a829e | 563 | { |
b07a889e NS |
564 | ssize_t ret; |
565 | struct request_queue *q = disk->queue; | |
645a829e | 566 | |
78c27134 | 567 | mutex_lock(&disk->rqos_state_mutex); |
b07a889e NS |
568 | if (!wbt_rq_qos(q)) { |
569 | ret = -EINVAL; | |
570 | goto out; | |
571 | } | |
645a829e | 572 | |
b07a889e NS |
573 | if (wbt_disabled(q)) { |
574 | ret = sysfs_emit(page, "0\n"); | |
575 | goto out; | |
576 | } | |
577 | ||
578 | ret = sysfs_emit(page, "%llu\n", div_u64(wbt_get_min_lat(q), 1000)); | |
579 | out: | |
78c27134 | 580 | mutex_unlock(&disk->rqos_state_mutex); |
b07a889e | 581 | return ret; |
645a829e YK |
582 | } |
583 | ||
62e35f94 | 584 | static ssize_t queue_wb_lat_store(struct gendisk *disk, const char *page, |
645a829e YK |
585 | size_t count) |
586 | { | |
62e35f94 | 587 | struct request_queue *q = disk->queue; |
645a829e YK |
588 | struct rq_qos *rqos; |
589 | ssize_t ret; | |
590 | s64 val; | |
b07a889e | 591 | unsigned int memflags; |
645a829e YK |
592 | |
593 | ret = queue_var_store64(&val, page); | |
594 | if (ret < 0) | |
595 | return ret; | |
596 | if (val < -1) | |
597 | return -EINVAL; | |
598 | ||
b07a889e NS |
599 | memflags = blk_mq_freeze_queue(q); |
600 | ||
645a829e YK |
601 | rqos = wbt_rq_qos(q); |
602 | if (!rqos) { | |
62e35f94 | 603 | ret = wbt_init(disk); |
645a829e | 604 | if (ret) |
b07a889e | 605 | goto out; |
645a829e YK |
606 | } |
607 | ||
b07a889e | 608 | ret = count; |
645a829e YK |
609 | if (val == -1) |
610 | val = wbt_default_latency_nsec(q); | |
611 | else if (val >= 0) | |
612 | val *= 1000ULL; | |
613 | ||
614 | if (wbt_get_min_lat(q) == val) | |
b07a889e | 615 | goto out; |
645a829e YK |
616 | |
617 | /* | |
618 | * Ensure that the queue is idled, in case the latency update | |
619 | * ends up either enabling or disabling wbt completely. We can't | |
620 | * have IO inflight if that happens. | |
621 | */ | |
645a829e YK |
622 | blk_mq_quiesce_queue(q); |
623 | ||
78c27134 | 624 | mutex_lock(&disk->rqos_state_mutex); |
645a829e | 625 | wbt_set_min_lat(q, val); |
78c27134 | 626 | mutex_unlock(&disk->rqos_state_mutex); |
645a829e YK |
627 | |
628 | blk_mq_unquiesce_queue(q); | |
b07a889e NS |
629 | out: |
630 | blk_mq_unfreeze_queue(q, memflags); | |
645a829e | 631 | |
b07a889e | 632 | return ret; |
645a829e YK |
633 | } |
634 | ||
635 | QUEUE_RW_ENTRY(queue_wb_lat, "wbt_lat_usec"); | |
636 | #endif | |
637 | ||
3649ff0a | 638 | /* Common attributes for bio-based and request-based queues. */ |
4d25339e | 639 | static struct attribute *queue_attrs[] = { |
6e51a127 NS |
640 | /* |
641 | * Attributes which are protected with q->limits_lock. | |
642 | */ | |
8324aa91 JA |
643 | &queue_max_hw_sectors_entry.attr, |
644 | &queue_max_sectors_entry.attr, | |
c77a5710 | 645 | &queue_max_segments_entry.attr, |
1e739730 | 646 | &queue_max_discard_segments_entry.attr, |
13f05c8d | 647 | &queue_max_integrity_segments_entry.attr, |
c77a5710 | 648 | &queue_max_segment_size_entry.attr, |
d2f526ba | 649 | &queue_max_write_streams_entry.attr, |
c23acfac | 650 | &queue_write_stream_granularity_entry.attr, |
e68b903c | 651 | &queue_hw_sector_size_entry.attr, |
e1defc4f | 652 | &queue_logical_block_size_entry.attr, |
c72758f3 | 653 | &queue_physical_block_size_entry.attr, |
87caf97c | 654 | &queue_chunk_sectors_entry.attr, |
c72758f3 MP |
655 | &queue_io_min_entry.attr, |
656 | &queue_io_opt_entry.attr, | |
86b37281 | 657 | &queue_discard_granularity_entry.attr, |
319e8cfd CH |
658 | &queue_max_discard_sectors_entry.attr, |
659 | &queue_max_hw_discard_sectors_entry.attr, | |
319e8cfd CH |
660 | &queue_atomic_write_max_sectors_entry.attr, |
661 | &queue_atomic_write_boundary_sectors_entry.attr, | |
9da3d1e9 JG |
662 | &queue_atomic_write_unit_min_entry.attr, |
663 | &queue_atomic_write_unit_max_entry.attr, | |
319e8cfd | 664 | &queue_max_write_zeroes_sectors_entry.attr, |
559218d4 | 665 | &queue_max_zone_append_sectors_entry.attr, |
a805a4fa | 666 | &queue_zone_write_granularity_entry.attr, |
bd4a633b | 667 | &queue_rotational_entry.attr, |
797476b8 | 668 | &queue_zoned_entry.attr, |
e15864f8 | 669 | &queue_max_open_zones_entry.attr, |
659bf827 | 670 | &queue_max_active_zones_entry.attr, |
110234da | 671 | &queue_iostats_passthrough_entry.attr, |
bc58ba94 | 672 | &queue_iostats_entry.attr, |
1cb039f3 | 673 | &queue_stable_writes_entry.attr, |
39a9f1c3 | 674 | &queue_add_random_entry.attr, |
93e9d8e8 | 675 | &queue_wc_entry.attr, |
6fcefbe5 | 676 | &queue_fua_entry.attr, |
ea6ca600 | 677 | &queue_dax_entry.attr, |
28af7428 | 678 | &queue_virt_boundary_mask_entry.attr, |
3850e13f | 679 | &queue_dma_alignment_entry.attr, |
6e51a127 | 680 | &queue_ra_entry.attr, |
d23977fe NS |
681 | |
682 | /* | |
683 | * Attributes which don't require locking. | |
684 | */ | |
6e51a127 NS |
685 | &queue_discard_zeroes_data_entry.attr, |
686 | &queue_write_same_max_entry.attr, | |
687 | &queue_nr_zones_entry.attr, | |
688 | &queue_nomerges_entry.attr, | |
689 | &queue_poll_entry.attr, | |
690 | &queue_poll_delay_entry.attr, | |
691 | ||
8324aa91 JA |
692 | NULL, |
693 | }; | |
694 | ||
3649ff0a | 695 | /* Request-based queue attributes that are not relevant for bio-based queues. */ |
6d85ebf9 | 696 | static struct attribute *blk_mq_queue_attrs[] = { |
1bf70d08 NS |
697 | /* |
698 | * Attributes which require some form of locking other than | |
699 | * q->sysfs_lock. | |
700 | */ | |
701 | &elv_iosched_entry.attr, | |
3efe7571 | 702 | &queue_requests_entry.attr, |
245618f8 NS |
703 | #ifdef CONFIG_BLK_WBT |
704 | &queue_wb_lat_entry.attr, | |
705 | #endif | |
d23977fe NS |
706 | /* |
707 | * Attributes which don't require locking. | |
708 | */ | |
709 | &queue_rq_affinity_entry.attr, | |
710 | &queue_io_timeout_entry.attr, | |
711 | ||
6d85ebf9 YK |
712 | NULL, |
713 | }; | |
714 | ||
4d25339e WZ |
715 | static umode_t queue_attr_visible(struct kobject *kobj, struct attribute *attr, |
716 | int n) | |
717 | { | |
2bd85221 CH |
718 | struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj); |
719 | struct request_queue *q = disk->queue; | |
4d25339e | 720 | |
659bf827 NC |
721 | if ((attr == &queue_max_open_zones_entry.attr || |
722 | attr == &queue_max_active_zones_entry.attr) && | |
e15864f8 NC |
723 | !blk_queue_is_zoned(q)) |
724 | return 0; | |
725 | ||
4d25339e WZ |
726 | return attr->mode; |
727 | } | |
728 | ||
6d85ebf9 YK |
729 | static umode_t blk_mq_queue_attr_visible(struct kobject *kobj, |
730 | struct attribute *attr, int n) | |
731 | { | |
732 | struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj); | |
733 | struct request_queue *q = disk->queue; | |
734 | ||
735 | if (!queue_is_mq(q)) | |
736 | return 0; | |
737 | ||
738 | if (attr == &queue_io_timeout_entry.attr && !q->mq_ops->timeout) | |
739 | return 0; | |
740 | ||
741 | return attr->mode; | |
742 | } | |
743 | ||
4d25339e WZ |
744 | static struct attribute_group queue_attr_group = { |
745 | .attrs = queue_attrs, | |
746 | .is_visible = queue_attr_visible, | |
747 | }; | |
748 | ||
6d85ebf9 YK |
749 | static struct attribute_group blk_mq_queue_attr_group = { |
750 | .attrs = blk_mq_queue_attrs, | |
751 | .is_visible = blk_mq_queue_attr_visible, | |
752 | }; | |
4d25339e | 753 | |
8324aa91 JA |
754 | #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr) |
755 | ||
756 | static ssize_t | |
757 | queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page) | |
758 | { | |
759 | struct queue_sysfs_entry *entry = to_queue(attr); | |
2bd85221 | 760 | struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj); |
8324aa91 | 761 | |
6e51a127 | 762 | if (!entry->show && !entry->show_limit) |
8324aa91 | 763 | return -EIO; |
6e51a127 NS |
764 | |
765 | if (entry->show_limit) { | |
b07a889e NS |
766 | ssize_t res; |
767 | ||
6e51a127 NS |
768 | mutex_lock(&disk->queue->limits_lock); |
769 | res = entry->show_limit(disk, page); | |
770 | mutex_unlock(&disk->queue->limits_lock); | |
771 | return res; | |
772 | } | |
773 | ||
b07a889e | 774 | return entry->show(disk, page); |
8324aa91 JA |
775 | } |
776 | ||
777 | static ssize_t | |
778 | queue_attr_store(struct kobject *kobj, struct attribute *attr, | |
779 | const char *page, size_t length) | |
780 | { | |
781 | struct queue_sysfs_entry *entry = to_queue(attr); | |
2bd85221 CH |
782 | struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj); |
783 | struct request_queue *q = disk->queue; | |
8324aa91 | 784 | |
a1623064 | 785 | if (!entry->store_limit && !entry->store) |
8324aa91 | 786 | return -EIO; |
6728cb0e | 787 | |
a1623064 | 788 | if (entry->store_limit) { |
b07a889e NS |
789 | ssize_t res; |
790 | ||
a1623064 CH |
791 | struct queue_limits lim = queue_limits_start_update(q); |
792 | ||
793 | res = entry->store_limit(disk, page, length, &lim); | |
794 | if (res < 0) { | |
795 | queue_limits_cancel_update(q); | |
c99f66e4 | 796 | return res; |
a1623064 | 797 | } |
c99f66e4 CH |
798 | |
799 | res = queue_limits_commit_update_frozen(q, &lim); | |
800 | if (res) | |
801 | return res; | |
802 | return length; | |
a1623064 | 803 | } |
c99f66e4 | 804 | |
b07a889e | 805 | return entry->store(disk, page, length); |
8324aa91 JA |
806 | } |
807 | ||
52cf25d0 | 808 | static const struct sysfs_ops queue_sysfs_ops = { |
8324aa91 JA |
809 | .show = queue_attr_show, |
810 | .store = queue_attr_store, | |
811 | }; | |
812 | ||
4a8d14bb CH |
813 | static const struct attribute_group *blk_queue_attr_groups[] = { |
814 | &queue_attr_group, | |
6d85ebf9 | 815 | &blk_mq_queue_attr_group, |
4a8d14bb CH |
816 | NULL |
817 | }; | |
818 | ||
2bd85221 CH |
819 | static void blk_queue_release(struct kobject *kobj) |
820 | { | |
821 | /* nothing to do here, all data is associated with the parent gendisk */ | |
822 | } | |
823 | ||
5f622417 | 824 | static const struct kobj_type blk_queue_ktype = { |
4a8d14bb | 825 | .default_groups = blk_queue_attr_groups, |
8324aa91 | 826 | .sysfs_ops = &queue_sysfs_ops, |
2bd85221 | 827 | .release = blk_queue_release, |
8324aa91 JA |
828 | }; |
829 | ||
6fc75f30 CH |
830 | static void blk_debugfs_remove(struct gendisk *disk) |
831 | { | |
832 | struct request_queue *q = disk->queue; | |
833 | ||
834 | mutex_lock(&q->debugfs_mutex); | |
835 | blk_trace_shutdown(q); | |
836 | debugfs_remove_recursive(q->debugfs_dir); | |
837 | q->debugfs_dir = NULL; | |
838 | q->sched_debugfs_dir = NULL; | |
839 | q->rqos_debugfs_dir = NULL; | |
840 | mutex_unlock(&q->debugfs_mutex); | |
841 | } | |
842 | ||
2c2086af BVA |
843 | /** |
844 | * blk_register_queue - register a block layer queue with sysfs | |
845 | * @disk: Disk of which the request queue should be registered with sysfs. | |
846 | */ | |
8324aa91 JA |
847 | int blk_register_queue(struct gendisk *disk) |
848 | { | |
8324aa91 | 849 | struct request_queue *q = disk->queue; |
8682b92e | 850 | int ret; |
8324aa91 | 851 | |
2bd85221 CH |
852 | kobject_init(&disk->queue_kobj, &blk_queue_ktype); |
853 | ret = kobject_add(&disk->queue_kobj, &disk_to_dev(disk)->kobj, "queue"); | |
cc5c516d | 854 | if (ret < 0) |
2bd85221 | 855 | goto out_put_queue_kobj; |
8324aa91 | 856 | |
40602997 CH |
857 | if (queue_is_mq(q)) { |
858 | ret = blk_mq_sysfs_register(disk); | |
859 | if (ret) | |
2bd85221 | 860 | goto out_put_queue_kobj; |
40602997 | 861 | } |
5cf9c91b CH |
862 | mutex_lock(&q->sysfs_lock); |
863 | ||
85e0cbbb | 864 | mutex_lock(&q->debugfs_mutex); |
2bd85221 | 865 | q->debugfs_dir = debugfs_create_dir(disk->disk_name, blk_debugfs_root); |
5cf9c91b | 866 | if (queue_is_mq(q)) |
a8ecdd71 | 867 | blk_mq_debugfs_register(q); |
5cf9c91b | 868 | mutex_unlock(&q->debugfs_mutex); |
a2247f19 | 869 | |
22d0c408 | 870 | ret = disk_register_independent_access_ranges(disk); |
a2247f19 | 871 | if (ret) |
40602997 | 872 | goto out_debugfs_remove; |
a2247f19 | 873 | |
1bf70d08 NS |
874 | ret = blk_crypto_sysfs_register(disk); |
875 | if (ret) | |
876 | goto out_unregister_ia_ranges; | |
877 | ||
1e44bedb ML |
878 | if (queue_is_mq(q)) |
879 | elevator_set_default(q); | |
245618f8 | 880 | wbt_enable_default(disk); |
20f01f16 | 881 | |
cecf5d87 | 882 | blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q); |
cecf5d87 ML |
883 | |
884 | /* Now everything is ready and send out KOBJ_ADD uevent */ | |
2bd85221 | 885 | kobject_uevent(&disk->queue_kobj, KOBJ_ADD); |
0546858c | 886 | if (q->elevator) |
cecf5d87 ML |
887 | kobject_uevent(&q->elevator->kobj, KOBJ_ADD); |
888 | mutex_unlock(&q->sysfs_lock); | |
a72c374f ML |
889 | |
890 | /* | |
891 | * SCSI probing may synchronously create and destroy a lot of | |
892 | * request_queues for non-existent devices. Shutting down a fully | |
893 | * functional queue takes measureable wallclock time as RCU grace | |
894 | * periods are involved. To avoid excessive latency in these | |
895 | * cases, a request_queue starts out in a degraded mode which is | |
896 | * faster to shut down and is made fully functional here as | |
897 | * request_queues for non-existent devices never get registered. | |
898 | */ | |
3802f73b YK |
899 | blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q); |
900 | percpu_ref_switch_to_percpu(&q->q_usage_counter); | |
a72c374f | 901 | |
a2247f19 DLM |
902 | return ret; |
903 | ||
40602997 | 904 | out_unregister_ia_ranges: |
a2247f19 | 905 | disk_unregister_independent_access_ranges(disk); |
40602997 CH |
906 | out_debugfs_remove: |
907 | blk_debugfs_remove(disk); | |
a2247f19 | 908 | mutex_unlock(&q->sysfs_lock); |
40f2eb9b ZQ |
909 | if (queue_is_mq(q)) |
910 | blk_mq_sysfs_unregister(disk); | |
2bd85221 CH |
911 | out_put_queue_kobj: |
912 | kobject_put(&disk->queue_kobj); | |
b410aff2 | 913 | return ret; |
8324aa91 JA |
914 | } |
915 | ||
2c2086af BVA |
916 | /** |
917 | * blk_unregister_queue - counterpart of blk_register_queue() | |
918 | * @disk: Disk of which the request queue should be unregistered from sysfs. | |
919 | * | |
920 | * Note: the caller is responsible for guaranteeing that this function is called | |
921 | * after blk_register_queue() has finished. | |
922 | */ | |
8324aa91 JA |
923 | void blk_unregister_queue(struct gendisk *disk) |
924 | { | |
925 | struct request_queue *q = disk->queue; | |
926 | ||
fb199746 AM |
927 | if (WARN_ON(!q)) |
928 | return; | |
929 | ||
fa70d2e2 | 930 | /* Return early if disk->queue was never registered. */ |
58c898ba | 931 | if (!blk_queue_registered(q)) |
fa70d2e2 MS |
932 | return; |
933 | ||
667257e8 | 934 | /* |
2c2086af BVA |
935 | * Since sysfs_remove_dir() prevents adding new directory entries |
936 | * before removal of existing entries starts, protect against | |
937 | * concurrent elv_iosched_store() calls. | |
667257e8 | 938 | */ |
e9a823fb | 939 | mutex_lock(&q->sysfs_lock); |
8814ce8a | 940 | blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q); |
cecf5d87 | 941 | mutex_unlock(&q->sysfs_lock); |
02ba8893 | 942 | |
2c2086af BVA |
943 | /* |
944 | * Remove the sysfs attributes before unregistering the queue data | |
945 | * structures that can be modified through sysfs. | |
946 | */ | |
344e9ffc | 947 | if (queue_is_mq(q)) |
8682b92e | 948 | blk_mq_sysfs_unregister(disk); |
450deb93 | 949 | blk_crypto_sysfs_unregister(disk); |
667257e8 | 950 | |
1bf70d08 | 951 | mutex_lock(&q->sysfs_lock); |
a2247f19 | 952 | disk_unregister_independent_access_ranges(disk); |
b89f625e | 953 | mutex_unlock(&q->sysfs_lock); |
0f692882 EB |
954 | |
955 | /* Now that we've deleted all child objects, we can delete the queue. */ | |
2bd85221 CH |
956 | kobject_uevent(&disk->queue_kobj, KOBJ_REMOVE); |
957 | kobject_del(&disk->queue_kobj); | |
2c2086af | 958 | |
824afb9b ML |
959 | if (queue_is_mq(q)) |
960 | elevator_set_none(q); | |
961 | ||
6fc75f30 | 962 | blk_debugfs_remove(disk); |
8324aa91 | 963 | } |