Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
8324aa91 JA |
2 | /* |
3 | * Functions related to sysfs handling | |
4 | */ | |
5 | #include <linux/kernel.h> | |
5a0e3ad6 | 6 | #include <linux/slab.h> |
8324aa91 JA |
7 | #include <linux/module.h> |
8 | #include <linux/bio.h> | |
9 | #include <linux/blkdev.h> | |
66114cad | 10 | #include <linux/backing-dev.h> |
8324aa91 | 11 | #include <linux/blktrace_api.h> |
85e0cbbb | 12 | #include <linux/debugfs.h> |
8324aa91 JA |
13 | |
14 | #include "blk.h" | |
3edcc0ce | 15 | #include "blk-mq.h" |
d173a251 | 16 | #include "blk-mq-debugfs.h" |
2aa7745b | 17 | #include "blk-mq-sched.h" |
0bc65bd4 | 18 | #include "blk-rq-qos.h" |
87760e5e | 19 | #include "blk-wbt.h" |
672fdcf0 | 20 | #include "blk-cgroup.h" |
a7b36ee6 | 21 | #include "blk-throttle.h" |
8324aa91 JA |
22 | |
23 | struct queue_sysfs_entry { | |
24 | struct attribute attr; | |
25 | ssize_t (*show)(struct request_queue *, char *); | |
26 | ssize_t (*store)(struct request_queue *, const char *, size_t); | |
27 | }; | |
28 | ||
29 | static ssize_t | |
9cb308ce | 30 | queue_var_show(unsigned long var, char *page) |
8324aa91 | 31 | { |
9cb308ce | 32 | return sprintf(page, "%lu\n", var); |
8324aa91 JA |
33 | } |
34 | ||
35 | static ssize_t | |
36 | queue_var_store(unsigned long *var, const char *page, size_t count) | |
37 | { | |
b1f3b64d DR |
38 | int err; |
39 | unsigned long v; | |
40 | ||
ed751e68 | 41 | err = kstrtoul(page, 10, &v); |
b1f3b64d DR |
42 | if (err || v > UINT_MAX) |
43 | return -EINVAL; | |
44 | ||
45 | *var = v; | |
8324aa91 | 46 | |
8324aa91 JA |
47 | return count; |
48 | } | |
49 | ||
80e091d1 | 50 | static ssize_t queue_var_store64(s64 *var, const char *page) |
87760e5e JA |
51 | { |
52 | int err; | |
80e091d1 | 53 | s64 v; |
87760e5e | 54 | |
80e091d1 | 55 | err = kstrtos64(page, 10, &v); |
87760e5e JA |
56 | if (err < 0) |
57 | return err; | |
58 | ||
59 | *var = v; | |
60 | return 0; | |
61 | } | |
62 | ||
8324aa91 JA |
63 | static ssize_t queue_requests_show(struct request_queue *q, char *page) |
64 | { | |
28af7428 | 65 | return queue_var_show(q->nr_requests, page); |
8324aa91 JA |
66 | } |
67 | ||
68 | static ssize_t | |
69 | queue_requests_store(struct request_queue *q, const char *page, size_t count) | |
70 | { | |
8324aa91 | 71 | unsigned long nr; |
e3a2b3f9 | 72 | int ret, err; |
b8a9ae77 | 73 | |
344e9ffc | 74 | if (!queue_is_mq(q)) |
b8a9ae77 JA |
75 | return -EINVAL; |
76 | ||
77 | ret = queue_var_store(&nr, page, count); | |
b1f3b64d DR |
78 | if (ret < 0) |
79 | return ret; | |
80 | ||
8324aa91 JA |
81 | if (nr < BLKDEV_MIN_RQ) |
82 | nr = BLKDEV_MIN_RQ; | |
83 | ||
a1ce35fa | 84 | err = blk_mq_update_nr_requests(q, nr); |
e3a2b3f9 JA |
85 | if (err) |
86 | return err; | |
87 | ||
8324aa91 JA |
88 | return ret; |
89 | } | |
90 | ||
91 | static ssize_t queue_ra_show(struct request_queue *q, char *page) | |
92 | { | |
edb0872f | 93 | unsigned long ra_kb; |
8324aa91 | 94 | |
d152c682 | 95 | if (!q->disk) |
edb0872f | 96 | return -EINVAL; |
d152c682 | 97 | ra_kb = q->disk->bdi->ra_pages << (PAGE_SHIFT - 10); |
8c390ff9 | 98 | return queue_var_show(ra_kb, page); |
8324aa91 JA |
99 | } |
100 | ||
101 | static ssize_t | |
102 | queue_ra_store(struct request_queue *q, const char *page, size_t count) | |
103 | { | |
104 | unsigned long ra_kb; | |
edb0872f | 105 | ssize_t ret; |
8324aa91 | 106 | |
d152c682 | 107 | if (!q->disk) |
edb0872f CH |
108 | return -EINVAL; |
109 | ret = queue_var_store(&ra_kb, page, count); | |
b1f3b64d DR |
110 | if (ret < 0) |
111 | return ret; | |
d152c682 | 112 | q->disk->bdi->ra_pages = ra_kb >> (PAGE_SHIFT - 10); |
8324aa91 JA |
113 | return ret; |
114 | } | |
115 | ||
116 | static ssize_t queue_max_sectors_show(struct request_queue *q, char *page) | |
117 | { | |
ae03bf63 | 118 | int max_sectors_kb = queue_max_sectors(q) >> 1; |
8324aa91 | 119 | |
8c390ff9 | 120 | return queue_var_show(max_sectors_kb, page); |
8324aa91 JA |
121 | } |
122 | ||
c77a5710 MP |
123 | static ssize_t queue_max_segments_show(struct request_queue *q, char *page) |
124 | { | |
8c390ff9 | 125 | return queue_var_show(queue_max_segments(q), page); |
c77a5710 MP |
126 | } |
127 | ||
1e739730 CH |
128 | static ssize_t queue_max_discard_segments_show(struct request_queue *q, |
129 | char *page) | |
130 | { | |
8c390ff9 | 131 | return queue_var_show(queue_max_discard_segments(q), page); |
1e739730 CH |
132 | } |
133 | ||
13f05c8d MP |
134 | static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page) |
135 | { | |
8c390ff9 | 136 | return queue_var_show(q->limits.max_integrity_segments, page); |
13f05c8d MP |
137 | } |
138 | ||
c77a5710 MP |
139 | static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page) |
140 | { | |
8c390ff9 | 141 | return queue_var_show(queue_max_segment_size(q), page); |
c77a5710 MP |
142 | } |
143 | ||
e1defc4f | 144 | static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page) |
e68b903c | 145 | { |
e1defc4f | 146 | return queue_var_show(queue_logical_block_size(q), page); |
e68b903c MP |
147 | } |
148 | ||
c72758f3 MP |
149 | static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page) |
150 | { | |
151 | return queue_var_show(queue_physical_block_size(q), page); | |
152 | } | |
153 | ||
87caf97c HR |
154 | static ssize_t queue_chunk_sectors_show(struct request_queue *q, char *page) |
155 | { | |
156 | return queue_var_show(q->limits.chunk_sectors, page); | |
157 | } | |
158 | ||
c72758f3 MP |
159 | static ssize_t queue_io_min_show(struct request_queue *q, char *page) |
160 | { | |
161 | return queue_var_show(queue_io_min(q), page); | |
162 | } | |
163 | ||
164 | static ssize_t queue_io_opt_show(struct request_queue *q, char *page) | |
165 | { | |
166 | return queue_var_show(queue_io_opt(q), page); | |
e68b903c MP |
167 | } |
168 | ||
86b37281 MP |
169 | static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page) |
170 | { | |
171 | return queue_var_show(q->limits.discard_granularity, page); | |
172 | } | |
173 | ||
0034af03 JA |
174 | static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page) |
175 | { | |
0034af03 | 176 | |
18f922d0 A |
177 | return sprintf(page, "%llu\n", |
178 | (unsigned long long)q->limits.max_hw_discard_sectors << 9); | |
0034af03 JA |
179 | } |
180 | ||
86b37281 MP |
181 | static ssize_t queue_discard_max_show(struct request_queue *q, char *page) |
182 | { | |
a934a00a MP |
183 | return sprintf(page, "%llu\n", |
184 | (unsigned long long)q->limits.max_discard_sectors << 9); | |
86b37281 MP |
185 | } |
186 | ||
0034af03 JA |
187 | static ssize_t queue_discard_max_store(struct request_queue *q, |
188 | const char *page, size_t count) | |
189 | { | |
190 | unsigned long max_discard; | |
191 | ssize_t ret = queue_var_store(&max_discard, page, count); | |
192 | ||
193 | if (ret < 0) | |
194 | return ret; | |
195 | ||
196 | if (max_discard & (q->limits.discard_granularity - 1)) | |
197 | return -EINVAL; | |
198 | ||
199 | max_discard >>= 9; | |
200 | if (max_discard > UINT_MAX) | |
201 | return -EINVAL; | |
202 | ||
203 | if (max_discard > q->limits.max_hw_discard_sectors) | |
204 | max_discard = q->limits.max_hw_discard_sectors; | |
205 | ||
206 | q->limits.max_discard_sectors = max_discard; | |
207 | return ret; | |
208 | } | |
209 | ||
98262f27 MP |
210 | static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page) |
211 | { | |
48920ff2 | 212 | return queue_var_show(0, page); |
98262f27 MP |
213 | } |
214 | ||
4363ac7c MP |
215 | static ssize_t queue_write_same_max_show(struct request_queue *q, char *page) |
216 | { | |
73bd66d9 | 217 | return queue_var_show(0, page); |
4363ac7c MP |
218 | } |
219 | ||
a6f0788e CK |
220 | static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page) |
221 | { | |
222 | return sprintf(page, "%llu\n", | |
223 | (unsigned long long)q->limits.max_write_zeroes_sectors << 9); | |
224 | } | |
4363ac7c | 225 | |
a805a4fa DLM |
226 | static ssize_t queue_zone_write_granularity_show(struct request_queue *q, |
227 | char *page) | |
228 | { | |
229 | return queue_var_show(queue_zone_write_granularity(q), page); | |
230 | } | |
231 | ||
0512a75b KB |
232 | static ssize_t queue_zone_append_max_show(struct request_queue *q, char *page) |
233 | { | |
234 | unsigned long long max_sectors = q->limits.max_zone_append_sectors; | |
235 | ||
236 | return sprintf(page, "%llu\n", max_sectors << SECTOR_SHIFT); | |
237 | } | |
238 | ||
8324aa91 JA |
239 | static ssize_t |
240 | queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) | |
241 | { | |
c9c77418 KB |
242 | unsigned long var; |
243 | unsigned int max_sectors_kb, | |
ae03bf63 | 244 | max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1, |
09cbfeaf | 245 | page_kb = 1 << (PAGE_SHIFT - 10); |
c9c77418 | 246 | ssize_t ret = queue_var_store(&var, page, count); |
8324aa91 | 247 | |
b1f3b64d DR |
248 | if (ret < 0) |
249 | return ret; | |
250 | ||
c9c77418 KB |
251 | max_sectors_kb = (unsigned int)var; |
252 | max_hw_sectors_kb = min_not_zero(max_hw_sectors_kb, | |
ca369d51 | 253 | q->limits.max_dev_sectors >> 1); |
c9c77418 KB |
254 | if (max_sectors_kb == 0) { |
255 | q->limits.max_user_sectors = 0; | |
256 | max_sectors_kb = min(max_hw_sectors_kb, | |
257 | BLK_DEF_MAX_SECTORS >> 1); | |
258 | } else { | |
259 | if (max_sectors_kb > max_hw_sectors_kb || | |
260 | max_sectors_kb < page_kb) | |
261 | return -EINVAL; | |
262 | q->limits.max_user_sectors = max_sectors_kb << 1; | |
263 | } | |
7c239517 | 264 | |
0d945c1f | 265 | spin_lock_irq(&q->queue_lock); |
c295fc05 | 266 | q->limits.max_sectors = max_sectors_kb << 1; |
d152c682 CH |
267 | if (q->disk) |
268 | q->disk->bdi->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10); | |
0d945c1f | 269 | spin_unlock_irq(&q->queue_lock); |
8324aa91 JA |
270 | |
271 | return ret; | |
272 | } | |
273 | ||
274 | static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page) | |
275 | { | |
ae03bf63 | 276 | int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1; |
8324aa91 | 277 | |
8c390ff9 | 278 | return queue_var_show(max_hw_sectors_kb, page); |
8324aa91 JA |
279 | } |
280 | ||
28af7428 MG |
281 | static ssize_t queue_virt_boundary_mask_show(struct request_queue *q, char *page) |
282 | { | |
8c390ff9 | 283 | return queue_var_show(q->limits.virt_boundary_mask, page); |
28af7428 MG |
284 | } |
285 | ||
3850e13f KB |
286 | static ssize_t queue_dma_alignment_show(struct request_queue *q, char *page) |
287 | { | |
288 | return queue_var_show(queue_dma_alignment(q), page); | |
289 | } | |
290 | ||
956bcb7c JA |
291 | #define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \ |
292 | static ssize_t \ | |
fc93fe14 | 293 | queue_##name##_show(struct request_queue *q, char *page) \ |
956bcb7c JA |
294 | { \ |
295 | int bit; \ | |
296 | bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \ | |
297 | return queue_var_show(neg ? !bit : bit, page); \ | |
298 | } \ | |
299 | static ssize_t \ | |
fc93fe14 | 300 | queue_##name##_store(struct request_queue *q, const char *page, size_t count) \ |
956bcb7c JA |
301 | { \ |
302 | unsigned long val; \ | |
303 | ssize_t ret; \ | |
304 | ret = queue_var_store(&val, page, count); \ | |
c678ef52 AB |
305 | if (ret < 0) \ |
306 | return ret; \ | |
956bcb7c JA |
307 | if (neg) \ |
308 | val = !val; \ | |
309 | \ | |
956bcb7c | 310 | if (val) \ |
8814ce8a | 311 | blk_queue_flag_set(QUEUE_FLAG_##flag, q); \ |
956bcb7c | 312 | else \ |
8814ce8a | 313 | blk_queue_flag_clear(QUEUE_FLAG_##flag, q); \ |
956bcb7c | 314 | return ret; \ |
1308835f BZ |
315 | } |
316 | ||
956bcb7c JA |
317 | QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1); |
318 | QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0); | |
319 | QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0); | |
1cb039f3 | 320 | QUEUE_SYSFS_BIT_FNS(stable_writes, STABLE_WRITES, 0); |
956bcb7c | 321 | #undef QUEUE_SYSFS_BIT_FNS |
1308835f | 322 | |
797476b8 DLM |
323 | static ssize_t queue_zoned_show(struct request_queue *q, char *page) |
324 | { | |
325 | switch (blk_queue_zoned_model(q)) { | |
326 | case BLK_ZONED_HA: | |
327 | return sprintf(page, "host-aware\n"); | |
328 | case BLK_ZONED_HM: | |
329 | return sprintf(page, "host-managed\n"); | |
330 | default: | |
331 | return sprintf(page, "none\n"); | |
332 | } | |
333 | } | |
334 | ||
965b652e DLM |
335 | static ssize_t queue_nr_zones_show(struct request_queue *q, char *page) |
336 | { | |
d86e716a | 337 | return queue_var_show(disk_nr_zones(q->disk), page); |
965b652e DLM |
338 | } |
339 | ||
e15864f8 NC |
340 | static ssize_t queue_max_open_zones_show(struct request_queue *q, char *page) |
341 | { | |
1dc01720 | 342 | return queue_var_show(bdev_max_open_zones(q->disk->part0), page); |
e15864f8 NC |
343 | } |
344 | ||
659bf827 NC |
345 | static ssize_t queue_max_active_zones_show(struct request_queue *q, char *page) |
346 | { | |
1dc01720 | 347 | return queue_var_show(bdev_max_active_zones(q->disk->part0), page); |
659bf827 NC |
348 | } |
349 | ||
ac9fafa1 AB |
350 | static ssize_t queue_nomerges_show(struct request_queue *q, char *page) |
351 | { | |
488991e2 AB |
352 | return queue_var_show((blk_queue_nomerges(q) << 1) | |
353 | blk_queue_noxmerges(q), page); | |
ac9fafa1 AB |
354 | } |
355 | ||
356 | static ssize_t queue_nomerges_store(struct request_queue *q, const char *page, | |
357 | size_t count) | |
358 | { | |
359 | unsigned long nm; | |
360 | ssize_t ret = queue_var_store(&nm, page, count); | |
361 | ||
b1f3b64d DR |
362 | if (ret < 0) |
363 | return ret; | |
364 | ||
57d74df9 CH |
365 | blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, q); |
366 | blk_queue_flag_clear(QUEUE_FLAG_NOXMERGES, q); | |
488991e2 | 367 | if (nm == 2) |
57d74df9 | 368 | blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q); |
488991e2 | 369 | else if (nm) |
57d74df9 | 370 | blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q); |
1308835f | 371 | |
ac9fafa1 AB |
372 | return ret; |
373 | } | |
374 | ||
c7c22e4d JA |
375 | static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page) |
376 | { | |
9cb308ce | 377 | bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags); |
5757a6d7 | 378 | bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags); |
c7c22e4d | 379 | |
5757a6d7 | 380 | return queue_var_show(set << force, page); |
c7c22e4d JA |
381 | } |
382 | ||
383 | static ssize_t | |
384 | queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count) | |
385 | { | |
386 | ssize_t ret = -EINVAL; | |
0a06ff06 | 387 | #ifdef CONFIG_SMP |
c7c22e4d JA |
388 | unsigned long val; |
389 | ||
390 | ret = queue_var_store(&val, page, count); | |
b1f3b64d DR |
391 | if (ret < 0) |
392 | return ret; | |
393 | ||
e8037d49 | 394 | if (val == 2) { |
57d74df9 CH |
395 | blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q); |
396 | blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, q); | |
e8037d49 | 397 | } else if (val == 1) { |
57d74df9 CH |
398 | blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q); |
399 | blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); | |
e8037d49 | 400 | } else if (val == 0) { |
57d74df9 CH |
401 | blk_queue_flag_clear(QUEUE_FLAG_SAME_COMP, q); |
402 | blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); | |
5757a6d7 | 403 | } |
c7c22e4d JA |
404 | #endif |
405 | return ret; | |
406 | } | |
8324aa91 | 407 | |
06426adf JA |
408 | static ssize_t queue_poll_delay_show(struct request_queue *q, char *page) |
409 | { | |
54bdd67d | 410 | return sprintf(page, "%d\n", -1); |
06426adf JA |
411 | } |
412 | ||
413 | static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page, | |
414 | size_t count) | |
415 | { | |
64f1c21e | 416 | return count; |
06426adf JA |
417 | } |
418 | ||
05229bee JA |
419 | static ssize_t queue_poll_show(struct request_queue *q, char *page) |
420 | { | |
421 | return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page); | |
422 | } | |
423 | ||
424 | static ssize_t queue_poll_store(struct request_queue *q, const char *page, | |
425 | size_t count) | |
426 | { | |
a614dd22 | 427 | if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) |
05229bee | 428 | return -EINVAL; |
a614dd22 CH |
429 | pr_info_ratelimited("writes to the poll attribute are ignored.\n"); |
430 | pr_info_ratelimited("please use driver specific parameters instead.\n"); | |
431 | return count; | |
05229bee JA |
432 | } |
433 | ||
65cd1d13 WZ |
434 | static ssize_t queue_io_timeout_show(struct request_queue *q, char *page) |
435 | { | |
436 | return sprintf(page, "%u\n", jiffies_to_msecs(q->rq_timeout)); | |
437 | } | |
438 | ||
439 | static ssize_t queue_io_timeout_store(struct request_queue *q, const char *page, | |
440 | size_t count) | |
441 | { | |
442 | unsigned int val; | |
443 | int err; | |
444 | ||
445 | err = kstrtou32(page, 10, &val); | |
446 | if (err || val == 0) | |
447 | return -EINVAL; | |
448 | ||
449 | blk_queue_rq_timeout(q, msecs_to_jiffies(val)); | |
450 | ||
451 | return count; | |
452 | } | |
453 | ||
87760e5e JA |
454 | static ssize_t queue_wb_lat_show(struct request_queue *q, char *page) |
455 | { | |
a7905043 | 456 | if (!wbt_rq_qos(q)) |
87760e5e JA |
457 | return -EINVAL; |
458 | ||
3642ef4d YK |
459 | if (wbt_disabled(q)) |
460 | return sprintf(page, "0\n"); | |
461 | ||
a7905043 | 462 | return sprintf(page, "%llu\n", div_u64(wbt_get_min_lat(q), 1000)); |
87760e5e JA |
463 | } |
464 | ||
465 | static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page, | |
466 | size_t count) | |
467 | { | |
a7905043 | 468 | struct rq_qos *rqos; |
87760e5e | 469 | ssize_t ret; |
80e091d1 | 470 | s64 val; |
87760e5e | 471 | |
87760e5e JA |
472 | ret = queue_var_store64(&val, page); |
473 | if (ret < 0) | |
474 | return ret; | |
d62118b6 JA |
475 | if (val < -1) |
476 | return -EINVAL; | |
477 | ||
a7905043 JB |
478 | rqos = wbt_rq_qos(q); |
479 | if (!rqos) { | |
958f2965 | 480 | ret = wbt_init(q->disk); |
d62118b6 JA |
481 | if (ret) |
482 | return ret; | |
d62118b6 | 483 | } |
87760e5e | 484 | |
80e091d1 | 485 | if (val == -1) |
a7905043 | 486 | val = wbt_default_latency_nsec(q); |
80e091d1 | 487 | else if (val >= 0) |
a7905043 | 488 | val *= 1000ULL; |
d62118b6 | 489 | |
b7143fe6 AZ |
490 | if (wbt_get_min_lat(q) == val) |
491 | return count; | |
492 | ||
c125311d JA |
493 | /* |
494 | * Ensure that the queue is idled, in case the latency update | |
495 | * ends up either enabling or disabling wbt completely. We can't | |
496 | * have IO inflight if that happens. | |
497 | */ | |
a1ce35fa JA |
498 | blk_mq_freeze_queue(q); |
499 | blk_mq_quiesce_queue(q); | |
80e091d1 | 500 | |
c125311d | 501 | wbt_set_min_lat(q, val); |
c125311d | 502 | |
a1ce35fa JA |
503 | blk_mq_unquiesce_queue(q); |
504 | blk_mq_unfreeze_queue(q); | |
c125311d | 505 | |
87760e5e JA |
506 | return count; |
507 | } | |
508 | ||
93e9d8e8 JA |
509 | static ssize_t queue_wc_show(struct request_queue *q, char *page) |
510 | { | |
511 | if (test_bit(QUEUE_FLAG_WC, &q->queue_flags)) | |
512 | return sprintf(page, "write back\n"); | |
513 | ||
514 | return sprintf(page, "write through\n"); | |
515 | } | |
516 | ||
517 | static ssize_t queue_wc_store(struct request_queue *q, const char *page, | |
518 | size_t count) | |
519 | { | |
520 | int set = -1; | |
521 | ||
522 | if (!strncmp(page, "write back", 10)) | |
523 | set = 1; | |
524 | else if (!strncmp(page, "write through", 13) || | |
525 | !strncmp(page, "none", 4)) | |
526 | set = 0; | |
527 | ||
528 | if (set == -1) | |
529 | return -EINVAL; | |
530 | ||
93e9d8e8 | 531 | if (set) |
8814ce8a | 532 | blk_queue_flag_set(QUEUE_FLAG_WC, q); |
93e9d8e8 | 533 | else |
8814ce8a | 534 | blk_queue_flag_clear(QUEUE_FLAG_WC, q); |
93e9d8e8 JA |
535 | |
536 | return count; | |
537 | } | |
538 | ||
6fcefbe5 KO |
539 | static ssize_t queue_fua_show(struct request_queue *q, char *page) |
540 | { | |
541 | return sprintf(page, "%u\n", test_bit(QUEUE_FLAG_FUA, &q->queue_flags)); | |
542 | } | |
543 | ||
ea6ca600 YK |
544 | static ssize_t queue_dax_show(struct request_queue *q, char *page) |
545 | { | |
546 | return queue_var_show(blk_queue_dax(q), page); | |
547 | } | |
548 | ||
35626147 CH |
549 | #define QUEUE_RO_ENTRY(_prefix, _name) \ |
550 | static struct queue_sysfs_entry _prefix##_entry = { \ | |
551 | .attr = { .name = _name, .mode = 0444 }, \ | |
552 | .show = _prefix##_show, \ | |
553 | }; | |
554 | ||
555 | #define QUEUE_RW_ENTRY(_prefix, _name) \ | |
556 | static struct queue_sysfs_entry _prefix##_entry = { \ | |
557 | .attr = { .name = _name, .mode = 0644 }, \ | |
558 | .show = _prefix##_show, \ | |
559 | .store = _prefix##_store, \ | |
560 | }; | |
561 | ||
562 | QUEUE_RW_ENTRY(queue_requests, "nr_requests"); | |
563 | QUEUE_RW_ENTRY(queue_ra, "read_ahead_kb"); | |
564 | QUEUE_RW_ENTRY(queue_max_sectors, "max_sectors_kb"); | |
565 | QUEUE_RO_ENTRY(queue_max_hw_sectors, "max_hw_sectors_kb"); | |
566 | QUEUE_RO_ENTRY(queue_max_segments, "max_segments"); | |
567 | QUEUE_RO_ENTRY(queue_max_integrity_segments, "max_integrity_segments"); | |
568 | QUEUE_RO_ENTRY(queue_max_segment_size, "max_segment_size"); | |
569 | QUEUE_RW_ENTRY(elv_iosched, "scheduler"); | |
570 | ||
571 | QUEUE_RO_ENTRY(queue_logical_block_size, "logical_block_size"); | |
572 | QUEUE_RO_ENTRY(queue_physical_block_size, "physical_block_size"); | |
573 | QUEUE_RO_ENTRY(queue_chunk_sectors, "chunk_sectors"); | |
574 | QUEUE_RO_ENTRY(queue_io_min, "minimum_io_size"); | |
575 | QUEUE_RO_ENTRY(queue_io_opt, "optimal_io_size"); | |
576 | ||
577 | QUEUE_RO_ENTRY(queue_max_discard_segments, "max_discard_segments"); | |
578 | QUEUE_RO_ENTRY(queue_discard_granularity, "discard_granularity"); | |
579 | QUEUE_RO_ENTRY(queue_discard_max_hw, "discard_max_hw_bytes"); | |
580 | QUEUE_RW_ENTRY(queue_discard_max, "discard_max_bytes"); | |
581 | QUEUE_RO_ENTRY(queue_discard_zeroes_data, "discard_zeroes_data"); | |
582 | ||
583 | QUEUE_RO_ENTRY(queue_write_same_max, "write_same_max_bytes"); | |
584 | QUEUE_RO_ENTRY(queue_write_zeroes_max, "write_zeroes_max_bytes"); | |
585 | QUEUE_RO_ENTRY(queue_zone_append_max, "zone_append_max_bytes"); | |
a805a4fa | 586 | QUEUE_RO_ENTRY(queue_zone_write_granularity, "zone_write_granularity"); |
35626147 CH |
587 | |
588 | QUEUE_RO_ENTRY(queue_zoned, "zoned"); | |
589 | QUEUE_RO_ENTRY(queue_nr_zones, "nr_zones"); | |
590 | QUEUE_RO_ENTRY(queue_max_open_zones, "max_open_zones"); | |
591 | QUEUE_RO_ENTRY(queue_max_active_zones, "max_active_zones"); | |
592 | ||
593 | QUEUE_RW_ENTRY(queue_nomerges, "nomerges"); | |
594 | QUEUE_RW_ENTRY(queue_rq_affinity, "rq_affinity"); | |
595 | QUEUE_RW_ENTRY(queue_poll, "io_poll"); | |
596 | QUEUE_RW_ENTRY(queue_poll_delay, "io_poll_delay"); | |
597 | QUEUE_RW_ENTRY(queue_wc, "write_cache"); | |
598 | QUEUE_RO_ENTRY(queue_fua, "fua"); | |
599 | QUEUE_RO_ENTRY(queue_dax, "dax"); | |
600 | QUEUE_RW_ENTRY(queue_io_timeout, "io_timeout"); | |
601 | QUEUE_RW_ENTRY(queue_wb_lat, "wbt_lat_usec"); | |
28af7428 | 602 | QUEUE_RO_ENTRY(queue_virt_boundary_mask, "virt_boundary_mask"); |
3850e13f | 603 | QUEUE_RO_ENTRY(queue_dma_alignment, "dma_alignment"); |
8324aa91 | 604 | |
35626147 CH |
605 | #ifdef CONFIG_BLK_DEV_THROTTLING_LOW |
606 | QUEUE_RW_ENTRY(blk_throtl_sample_time, "throttle_sample_time"); | |
607 | #endif | |
8324aa91 | 608 | |
35626147 | 609 | /* legacy alias for logical_block_size: */ |
e68b903c | 610 | static struct queue_sysfs_entry queue_hw_sector_size_entry = { |
5657a819 | 611 | .attr = {.name = "hw_sector_size", .mode = 0444 }, |
e1defc4f MP |
612 | .show = queue_logical_block_size_show, |
613 | }; | |
614 | ||
fc93fe14 CH |
615 | QUEUE_RW_ENTRY(queue_nonrot, "rotational"); |
616 | QUEUE_RW_ENTRY(queue_iostats, "iostats"); | |
617 | QUEUE_RW_ENTRY(queue_random, "add_random"); | |
1cb039f3 | 618 | QUEUE_RW_ENTRY(queue_stable_writes, "stable_writes"); |
e2e1a148 | 619 | |
4d25339e | 620 | static struct attribute *queue_attrs[] = { |
8324aa91 JA |
621 | &queue_requests_entry.attr, |
622 | &queue_ra_entry.attr, | |
623 | &queue_max_hw_sectors_entry.attr, | |
624 | &queue_max_sectors_entry.attr, | |
c77a5710 | 625 | &queue_max_segments_entry.attr, |
1e739730 | 626 | &queue_max_discard_segments_entry.attr, |
13f05c8d | 627 | &queue_max_integrity_segments_entry.attr, |
c77a5710 | 628 | &queue_max_segment_size_entry.attr, |
35626147 | 629 | &elv_iosched_entry.attr, |
e68b903c | 630 | &queue_hw_sector_size_entry.attr, |
e1defc4f | 631 | &queue_logical_block_size_entry.attr, |
c72758f3 | 632 | &queue_physical_block_size_entry.attr, |
87caf97c | 633 | &queue_chunk_sectors_entry.attr, |
c72758f3 MP |
634 | &queue_io_min_entry.attr, |
635 | &queue_io_opt_entry.attr, | |
86b37281 MP |
636 | &queue_discard_granularity_entry.attr, |
637 | &queue_discard_max_entry.attr, | |
0034af03 | 638 | &queue_discard_max_hw_entry.attr, |
98262f27 | 639 | &queue_discard_zeroes_data_entry.attr, |
4363ac7c | 640 | &queue_write_same_max_entry.attr, |
a6f0788e | 641 | &queue_write_zeroes_max_entry.attr, |
0512a75b | 642 | &queue_zone_append_max_entry.attr, |
a805a4fa | 643 | &queue_zone_write_granularity_entry.attr, |
1308835f | 644 | &queue_nonrot_entry.attr, |
797476b8 | 645 | &queue_zoned_entry.attr, |
965b652e | 646 | &queue_nr_zones_entry.attr, |
e15864f8 | 647 | &queue_max_open_zones_entry.attr, |
659bf827 | 648 | &queue_max_active_zones_entry.attr, |
ac9fafa1 | 649 | &queue_nomerges_entry.attr, |
c7c22e4d | 650 | &queue_rq_affinity_entry.attr, |
bc58ba94 | 651 | &queue_iostats_entry.attr, |
1cb039f3 | 652 | &queue_stable_writes_entry.attr, |
e2e1a148 | 653 | &queue_random_entry.attr, |
05229bee | 654 | &queue_poll_entry.attr, |
93e9d8e8 | 655 | &queue_wc_entry.attr, |
6fcefbe5 | 656 | &queue_fua_entry.attr, |
ea6ca600 | 657 | &queue_dax_entry.attr, |
87760e5e | 658 | &queue_wb_lat_entry.attr, |
06426adf | 659 | &queue_poll_delay_entry.attr, |
65cd1d13 | 660 | &queue_io_timeout_entry.attr, |
297e3d85 | 661 | #ifdef CONFIG_BLK_DEV_THROTTLING_LOW |
35626147 | 662 | &blk_throtl_sample_time_entry.attr, |
297e3d85 | 663 | #endif |
28af7428 | 664 | &queue_virt_boundary_mask_entry.attr, |
3850e13f | 665 | &queue_dma_alignment_entry.attr, |
8324aa91 JA |
666 | NULL, |
667 | }; | |
668 | ||
4d25339e WZ |
669 | static umode_t queue_attr_visible(struct kobject *kobj, struct attribute *attr, |
670 | int n) | |
671 | { | |
2bd85221 CH |
672 | struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj); |
673 | struct request_queue *q = disk->queue; | |
4d25339e WZ |
674 | |
675 | if (attr == &queue_io_timeout_entry.attr && | |
676 | (!q->mq_ops || !q->mq_ops->timeout)) | |
677 | return 0; | |
678 | ||
659bf827 NC |
679 | if ((attr == &queue_max_open_zones_entry.attr || |
680 | attr == &queue_max_active_zones_entry.attr) && | |
e15864f8 NC |
681 | !blk_queue_is_zoned(q)) |
682 | return 0; | |
683 | ||
4d25339e WZ |
684 | return attr->mode; |
685 | } | |
686 | ||
687 | static struct attribute_group queue_attr_group = { | |
688 | .attrs = queue_attrs, | |
689 | .is_visible = queue_attr_visible, | |
690 | }; | |
691 | ||
692 | ||
8324aa91 JA |
693 | #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr) |
694 | ||
695 | static ssize_t | |
696 | queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page) | |
697 | { | |
698 | struct queue_sysfs_entry *entry = to_queue(attr); | |
2bd85221 CH |
699 | struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj); |
700 | struct request_queue *q = disk->queue; | |
8324aa91 JA |
701 | ssize_t res; |
702 | ||
703 | if (!entry->show) | |
704 | return -EIO; | |
705 | mutex_lock(&q->sysfs_lock); | |
8324aa91 JA |
706 | res = entry->show(q, page); |
707 | mutex_unlock(&q->sysfs_lock); | |
708 | return res; | |
709 | } | |
710 | ||
711 | static ssize_t | |
712 | queue_attr_store(struct kobject *kobj, struct attribute *attr, | |
713 | const char *page, size_t length) | |
714 | { | |
715 | struct queue_sysfs_entry *entry = to_queue(attr); | |
2bd85221 CH |
716 | struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj); |
717 | struct request_queue *q = disk->queue; | |
8324aa91 JA |
718 | ssize_t res; |
719 | ||
720 | if (!entry->store) | |
721 | return -EIO; | |
6728cb0e | 722 | |
8324aa91 | 723 | mutex_lock(&q->sysfs_lock); |
8324aa91 JA |
724 | res = entry->store(q, page, length); |
725 | mutex_unlock(&q->sysfs_lock); | |
726 | return res; | |
727 | } | |
728 | ||
52cf25d0 | 729 | static const struct sysfs_ops queue_sysfs_ops = { |
8324aa91 JA |
730 | .show = queue_attr_show, |
731 | .store = queue_attr_store, | |
732 | }; | |
733 | ||
4a8d14bb CH |
734 | static const struct attribute_group *blk_queue_attr_groups[] = { |
735 | &queue_attr_group, | |
736 | NULL | |
737 | }; | |
738 | ||
2bd85221 CH |
739 | static void blk_queue_release(struct kobject *kobj) |
740 | { | |
741 | /* nothing to do here, all data is associated with the parent gendisk */ | |
742 | } | |
743 | ||
5f622417 | 744 | static const struct kobj_type blk_queue_ktype = { |
4a8d14bb | 745 | .default_groups = blk_queue_attr_groups, |
8324aa91 | 746 | .sysfs_ops = &queue_sysfs_ops, |
2bd85221 | 747 | .release = blk_queue_release, |
8324aa91 JA |
748 | }; |
749 | ||
6fc75f30 CH |
750 | static void blk_debugfs_remove(struct gendisk *disk) |
751 | { | |
752 | struct request_queue *q = disk->queue; | |
753 | ||
754 | mutex_lock(&q->debugfs_mutex); | |
755 | blk_trace_shutdown(q); | |
756 | debugfs_remove_recursive(q->debugfs_dir); | |
757 | q->debugfs_dir = NULL; | |
758 | q->sched_debugfs_dir = NULL; | |
759 | q->rqos_debugfs_dir = NULL; | |
760 | mutex_unlock(&q->debugfs_mutex); | |
761 | } | |
762 | ||
2c2086af BVA |
763 | /** |
764 | * blk_register_queue - register a block layer queue with sysfs | |
765 | * @disk: Disk of which the request queue should be registered with sysfs. | |
766 | */ | |
8324aa91 JA |
767 | int blk_register_queue(struct gendisk *disk) |
768 | { | |
8324aa91 | 769 | struct request_queue *q = disk->queue; |
8682b92e | 770 | int ret; |
8324aa91 | 771 | |
cecf5d87 | 772 | mutex_lock(&q->sysfs_dir_lock); |
2bd85221 CH |
773 | kobject_init(&disk->queue_kobj, &blk_queue_ktype); |
774 | ret = kobject_add(&disk->queue_kobj, &disk_to_dev(disk)->kobj, "queue"); | |
cc5c516d | 775 | if (ret < 0) |
2bd85221 | 776 | goto out_put_queue_kobj; |
8324aa91 | 777 | |
40602997 CH |
778 | if (queue_is_mq(q)) { |
779 | ret = blk_mq_sysfs_register(disk); | |
780 | if (ret) | |
2bd85221 | 781 | goto out_put_queue_kobj; |
40602997 | 782 | } |
5cf9c91b CH |
783 | mutex_lock(&q->sysfs_lock); |
784 | ||
85e0cbbb | 785 | mutex_lock(&q->debugfs_mutex); |
2bd85221 | 786 | q->debugfs_dir = debugfs_create_dir(disk->disk_name, blk_debugfs_root); |
5cf9c91b | 787 | if (queue_is_mq(q)) |
a8ecdd71 | 788 | blk_mq_debugfs_register(q); |
5cf9c91b | 789 | mutex_unlock(&q->debugfs_mutex); |
a2247f19 | 790 | |
22d0c408 | 791 | ret = disk_register_independent_access_ranges(disk); |
a2247f19 | 792 | if (ret) |
40602997 | 793 | goto out_debugfs_remove; |
a2247f19 | 794 | |
344e9ffc | 795 | if (q->elevator) { |
cecf5d87 | 796 | ret = elv_register_queue(q, false); |
a2247f19 | 797 | if (ret) |
40602997 | 798 | goto out_unregister_ia_ranges; |
8324aa91 | 799 | } |
cecf5d87 | 800 | |
450deb93 | 801 | ret = blk_crypto_sysfs_register(disk); |
20f01f16 | 802 | if (ret) |
40602997 | 803 | goto out_elv_unregister; |
20f01f16 | 804 | |
cecf5d87 | 805 | blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q); |
04aad37b | 806 | wbt_enable_default(disk); |
5f6dc752 | 807 | blk_throtl_register(disk); |
cecf5d87 ML |
808 | |
809 | /* Now everything is ready and send out KOBJ_ADD uevent */ | |
2bd85221 | 810 | kobject_uevent(&disk->queue_kobj, KOBJ_ADD); |
0546858c | 811 | if (q->elevator) |
cecf5d87 ML |
812 | kobject_uevent(&q->elevator->kobj, KOBJ_ADD); |
813 | mutex_unlock(&q->sysfs_lock); | |
cecf5d87 | 814 | mutex_unlock(&q->sysfs_dir_lock); |
a72c374f ML |
815 | |
816 | /* | |
817 | * SCSI probing may synchronously create and destroy a lot of | |
818 | * request_queues for non-existent devices. Shutting down a fully | |
819 | * functional queue takes measureable wallclock time as RCU grace | |
820 | * periods are involved. To avoid excessive latency in these | |
821 | * cases, a request_queue starts out in a degraded mode which is | |
822 | * faster to shut down and is made fully functional here as | |
823 | * request_queues for non-existent devices never get registered. | |
824 | */ | |
825 | if (!blk_queue_init_done(q)) { | |
826 | blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q); | |
827 | percpu_ref_switch_to_percpu(&q->q_usage_counter); | |
828 | } | |
829 | ||
a2247f19 DLM |
830 | return ret; |
831 | ||
40602997 | 832 | out_elv_unregister: |
20f01f16 | 833 | elv_unregister_queue(q); |
40602997 | 834 | out_unregister_ia_ranges: |
a2247f19 | 835 | disk_unregister_independent_access_ranges(disk); |
40602997 CH |
836 | out_debugfs_remove: |
837 | blk_debugfs_remove(disk); | |
a2247f19 | 838 | mutex_unlock(&q->sysfs_lock); |
2bd85221 CH |
839 | out_put_queue_kobj: |
840 | kobject_put(&disk->queue_kobj); | |
40602997 | 841 | mutex_unlock(&q->sysfs_dir_lock); |
b410aff2 | 842 | return ret; |
8324aa91 JA |
843 | } |
844 | ||
2c2086af BVA |
845 | /** |
846 | * blk_unregister_queue - counterpart of blk_register_queue() | |
847 | * @disk: Disk of which the request queue should be unregistered from sysfs. | |
848 | * | |
849 | * Note: the caller is responsible for guaranteeing that this function is called | |
850 | * after blk_register_queue() has finished. | |
851 | */ | |
8324aa91 JA |
852 | void blk_unregister_queue(struct gendisk *disk) |
853 | { | |
854 | struct request_queue *q = disk->queue; | |
855 | ||
fb199746 AM |
856 | if (WARN_ON(!q)) |
857 | return; | |
858 | ||
fa70d2e2 | 859 | /* Return early if disk->queue was never registered. */ |
58c898ba | 860 | if (!blk_queue_registered(q)) |
fa70d2e2 MS |
861 | return; |
862 | ||
667257e8 | 863 | /* |
2c2086af BVA |
864 | * Since sysfs_remove_dir() prevents adding new directory entries |
865 | * before removal of existing entries starts, protect against | |
866 | * concurrent elv_iosched_store() calls. | |
667257e8 | 867 | */ |
e9a823fb | 868 | mutex_lock(&q->sysfs_lock); |
8814ce8a | 869 | blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q); |
cecf5d87 | 870 | mutex_unlock(&q->sysfs_lock); |
02ba8893 | 871 | |
cecf5d87 | 872 | mutex_lock(&q->sysfs_dir_lock); |
2c2086af BVA |
873 | /* |
874 | * Remove the sysfs attributes before unregistering the queue data | |
875 | * structures that can be modified through sysfs. | |
876 | */ | |
344e9ffc | 877 | if (queue_is_mq(q)) |
8682b92e | 878 | blk_mq_sysfs_unregister(disk); |
450deb93 | 879 | blk_crypto_sysfs_unregister(disk); |
667257e8 | 880 | |
b89f625e | 881 | mutex_lock(&q->sysfs_lock); |
f5ec592d | 882 | elv_unregister_queue(q); |
a2247f19 | 883 | disk_unregister_independent_access_ranges(disk); |
b89f625e | 884 | mutex_unlock(&q->sysfs_lock); |
0f692882 EB |
885 | |
886 | /* Now that we've deleted all child objects, we can delete the queue. */ | |
2bd85221 CH |
887 | kobject_uevent(&disk->queue_kobj, KOBJ_REMOVE); |
888 | kobject_del(&disk->queue_kobj); | |
cecf5d87 | 889 | mutex_unlock(&q->sysfs_dir_lock); |
2c2086af | 890 | |
6fc75f30 | 891 | blk_debugfs_remove(disk); |
8324aa91 | 892 | } |