Commit | Line | Data |
---|---|---|
8324aa91 JA |
1 | /* |
2 | * Functions related to sysfs handling | |
3 | */ | |
4 | #include <linux/kernel.h> | |
5a0e3ad6 | 5 | #include <linux/slab.h> |
8324aa91 JA |
6 | #include <linux/module.h> |
7 | #include <linux/bio.h> | |
8 | #include <linux/blkdev.h> | |
66114cad | 9 | #include <linux/backing-dev.h> |
8324aa91 | 10 | #include <linux/blktrace_api.h> |
320ae51f | 11 | #include <linux/blk-mq.h> |
eea8f41c | 12 | #include <linux/blk-cgroup.h> |
8324aa91 JA |
13 | |
14 | #include "blk.h" | |
3edcc0ce | 15 | #include "blk-mq.h" |
d173a251 | 16 | #include "blk-mq-debugfs.h" |
87760e5e | 17 | #include "blk-wbt.h" |
8324aa91 JA |
18 | |
19 | struct queue_sysfs_entry { | |
20 | struct attribute attr; | |
21 | ssize_t (*show)(struct request_queue *, char *); | |
22 | ssize_t (*store)(struct request_queue *, const char *, size_t); | |
23 | }; | |
24 | ||
25 | static ssize_t | |
9cb308ce | 26 | queue_var_show(unsigned long var, char *page) |
8324aa91 | 27 | { |
9cb308ce | 28 | return sprintf(page, "%lu\n", var); |
8324aa91 JA |
29 | } |
30 | ||
31 | static ssize_t | |
32 | queue_var_store(unsigned long *var, const char *page, size_t count) | |
33 | { | |
b1f3b64d DR |
34 | int err; |
35 | unsigned long v; | |
36 | ||
ed751e68 | 37 | err = kstrtoul(page, 10, &v); |
b1f3b64d DR |
38 | if (err || v > UINT_MAX) |
39 | return -EINVAL; | |
40 | ||
41 | *var = v; | |
8324aa91 | 42 | |
8324aa91 JA |
43 | return count; |
44 | } | |
45 | ||
80e091d1 | 46 | static ssize_t queue_var_store64(s64 *var, const char *page) |
87760e5e JA |
47 | { |
48 | int err; | |
80e091d1 | 49 | s64 v; |
87760e5e | 50 | |
80e091d1 | 51 | err = kstrtos64(page, 10, &v); |
87760e5e JA |
52 | if (err < 0) |
53 | return err; | |
54 | ||
55 | *var = v; | |
56 | return 0; | |
57 | } | |
58 | ||
8324aa91 JA |
59 | static ssize_t queue_requests_show(struct request_queue *q, char *page) |
60 | { | |
61 | return queue_var_show(q->nr_requests, (page)); | |
62 | } | |
63 | ||
64 | static ssize_t | |
65 | queue_requests_store(struct request_queue *q, const char *page, size_t count) | |
66 | { | |
8324aa91 | 67 | unsigned long nr; |
e3a2b3f9 | 68 | int ret, err; |
b8a9ae77 | 69 | |
e3a2b3f9 | 70 | if (!q->request_fn && !q->mq_ops) |
b8a9ae77 JA |
71 | return -EINVAL; |
72 | ||
73 | ret = queue_var_store(&nr, page, count); | |
b1f3b64d DR |
74 | if (ret < 0) |
75 | return ret; | |
76 | ||
8324aa91 JA |
77 | if (nr < BLKDEV_MIN_RQ) |
78 | nr = BLKDEV_MIN_RQ; | |
79 | ||
e3a2b3f9 JA |
80 | if (q->request_fn) |
81 | err = blk_update_nr_requests(q, nr); | |
82 | else | |
83 | err = blk_mq_update_nr_requests(q, nr); | |
84 | ||
85 | if (err) | |
86 | return err; | |
87 | ||
8324aa91 JA |
88 | return ret; |
89 | } | |
90 | ||
91 | static ssize_t queue_ra_show(struct request_queue *q, char *page) | |
92 | { | |
dc3b17cc | 93 | unsigned long ra_kb = q->backing_dev_info->ra_pages << |
09cbfeaf | 94 | (PAGE_SHIFT - 10); |
8324aa91 JA |
95 | |
96 | return queue_var_show(ra_kb, (page)); | |
97 | } | |
98 | ||
99 | static ssize_t | |
100 | queue_ra_store(struct request_queue *q, const char *page, size_t count) | |
101 | { | |
102 | unsigned long ra_kb; | |
103 | ssize_t ret = queue_var_store(&ra_kb, page, count); | |
104 | ||
b1f3b64d DR |
105 | if (ret < 0) |
106 | return ret; | |
107 | ||
dc3b17cc | 108 | q->backing_dev_info->ra_pages = ra_kb >> (PAGE_SHIFT - 10); |
8324aa91 JA |
109 | |
110 | return ret; | |
111 | } | |
112 | ||
113 | static ssize_t queue_max_sectors_show(struct request_queue *q, char *page) | |
114 | { | |
ae03bf63 | 115 | int max_sectors_kb = queue_max_sectors(q) >> 1; |
8324aa91 JA |
116 | |
117 | return queue_var_show(max_sectors_kb, (page)); | |
118 | } | |
119 | ||
c77a5710 MP |
120 | static ssize_t queue_max_segments_show(struct request_queue *q, char *page) |
121 | { | |
122 | return queue_var_show(queue_max_segments(q), (page)); | |
123 | } | |
124 | ||
1e739730 CH |
125 | static ssize_t queue_max_discard_segments_show(struct request_queue *q, |
126 | char *page) | |
127 | { | |
128 | return queue_var_show(queue_max_discard_segments(q), (page)); | |
129 | } | |
130 | ||
13f05c8d MP |
131 | static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page) |
132 | { | |
133 | return queue_var_show(q->limits.max_integrity_segments, (page)); | |
134 | } | |
135 | ||
c77a5710 MP |
136 | static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page) |
137 | { | |
e692cb66 | 138 | if (blk_queue_cluster(q)) |
c77a5710 MP |
139 | return queue_var_show(queue_max_segment_size(q), (page)); |
140 | ||
09cbfeaf | 141 | return queue_var_show(PAGE_SIZE, (page)); |
c77a5710 MP |
142 | } |
143 | ||
e1defc4f | 144 | static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page) |
e68b903c | 145 | { |
e1defc4f | 146 | return queue_var_show(queue_logical_block_size(q), page); |
e68b903c MP |
147 | } |
148 | ||
c72758f3 MP |
149 | static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page) |
150 | { | |
151 | return queue_var_show(queue_physical_block_size(q), page); | |
152 | } | |
153 | ||
87caf97c HR |
154 | static ssize_t queue_chunk_sectors_show(struct request_queue *q, char *page) |
155 | { | |
156 | return queue_var_show(q->limits.chunk_sectors, page); | |
157 | } | |
158 | ||
c72758f3 MP |
159 | static ssize_t queue_io_min_show(struct request_queue *q, char *page) |
160 | { | |
161 | return queue_var_show(queue_io_min(q), page); | |
162 | } | |
163 | ||
164 | static ssize_t queue_io_opt_show(struct request_queue *q, char *page) | |
165 | { | |
166 | return queue_var_show(queue_io_opt(q), page); | |
e68b903c MP |
167 | } |
168 | ||
86b37281 MP |
169 | static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page) |
170 | { | |
171 | return queue_var_show(q->limits.discard_granularity, page); | |
172 | } | |
173 | ||
0034af03 JA |
174 | static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page) |
175 | { | |
0034af03 | 176 | |
18f922d0 A |
177 | return sprintf(page, "%llu\n", |
178 | (unsigned long long)q->limits.max_hw_discard_sectors << 9); | |
0034af03 JA |
179 | } |
180 | ||
86b37281 MP |
181 | static ssize_t queue_discard_max_show(struct request_queue *q, char *page) |
182 | { | |
a934a00a MP |
183 | return sprintf(page, "%llu\n", |
184 | (unsigned long long)q->limits.max_discard_sectors << 9); | |
86b37281 MP |
185 | } |
186 | ||
0034af03 JA |
187 | static ssize_t queue_discard_max_store(struct request_queue *q, |
188 | const char *page, size_t count) | |
189 | { | |
190 | unsigned long max_discard; | |
191 | ssize_t ret = queue_var_store(&max_discard, page, count); | |
192 | ||
193 | if (ret < 0) | |
194 | return ret; | |
195 | ||
196 | if (max_discard & (q->limits.discard_granularity - 1)) | |
197 | return -EINVAL; | |
198 | ||
199 | max_discard >>= 9; | |
200 | if (max_discard > UINT_MAX) | |
201 | return -EINVAL; | |
202 | ||
203 | if (max_discard > q->limits.max_hw_discard_sectors) | |
204 | max_discard = q->limits.max_hw_discard_sectors; | |
205 | ||
206 | q->limits.max_discard_sectors = max_discard; | |
207 | return ret; | |
208 | } | |
209 | ||
98262f27 MP |
210 | static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page) |
211 | { | |
48920ff2 | 212 | return queue_var_show(0, page); |
98262f27 MP |
213 | } |
214 | ||
4363ac7c MP |
215 | static ssize_t queue_write_same_max_show(struct request_queue *q, char *page) |
216 | { | |
217 | return sprintf(page, "%llu\n", | |
218 | (unsigned long long)q->limits.max_write_same_sectors << 9); | |
219 | } | |
220 | ||
a6f0788e CK |
221 | static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page) |
222 | { | |
223 | return sprintf(page, "%llu\n", | |
224 | (unsigned long long)q->limits.max_write_zeroes_sectors << 9); | |
225 | } | |
4363ac7c | 226 | |
8324aa91 JA |
227 | static ssize_t |
228 | queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) | |
229 | { | |
230 | unsigned long max_sectors_kb, | |
ae03bf63 | 231 | max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1, |
09cbfeaf | 232 | page_kb = 1 << (PAGE_SHIFT - 10); |
8324aa91 JA |
233 | ssize_t ret = queue_var_store(&max_sectors_kb, page, count); |
234 | ||
b1f3b64d DR |
235 | if (ret < 0) |
236 | return ret; | |
237 | ||
ca369d51 MP |
238 | max_hw_sectors_kb = min_not_zero(max_hw_sectors_kb, (unsigned long) |
239 | q->limits.max_dev_sectors >> 1); | |
240 | ||
8324aa91 JA |
241 | if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb) |
242 | return -EINVAL; | |
7c239517 | 243 | |
8324aa91 | 244 | spin_lock_irq(q->queue_lock); |
c295fc05 | 245 | q->limits.max_sectors = max_sectors_kb << 1; |
dc3b17cc | 246 | q->backing_dev_info->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10); |
8324aa91 JA |
247 | spin_unlock_irq(q->queue_lock); |
248 | ||
249 | return ret; | |
250 | } | |
251 | ||
252 | static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page) | |
253 | { | |
ae03bf63 | 254 | int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1; |
8324aa91 JA |
255 | |
256 | return queue_var_show(max_hw_sectors_kb, (page)); | |
257 | } | |
258 | ||
956bcb7c JA |
259 | #define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \ |
260 | static ssize_t \ | |
261 | queue_show_##name(struct request_queue *q, char *page) \ | |
262 | { \ | |
263 | int bit; \ | |
264 | bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \ | |
265 | return queue_var_show(neg ? !bit : bit, page); \ | |
266 | } \ | |
267 | static ssize_t \ | |
268 | queue_store_##name(struct request_queue *q, const char *page, size_t count) \ | |
269 | { \ | |
270 | unsigned long val; \ | |
271 | ssize_t ret; \ | |
272 | ret = queue_var_store(&val, page, count); \ | |
c678ef52 AB |
273 | if (ret < 0) \ |
274 | return ret; \ | |
956bcb7c JA |
275 | if (neg) \ |
276 | val = !val; \ | |
277 | \ | |
278 | spin_lock_irq(q->queue_lock); \ | |
279 | if (val) \ | |
280 | queue_flag_set(QUEUE_FLAG_##flag, q); \ | |
281 | else \ | |
282 | queue_flag_clear(QUEUE_FLAG_##flag, q); \ | |
283 | spin_unlock_irq(q->queue_lock); \ | |
284 | return ret; \ | |
1308835f BZ |
285 | } |
286 | ||
956bcb7c JA |
287 | QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1); |
288 | QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0); | |
289 | QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0); | |
290 | #undef QUEUE_SYSFS_BIT_FNS | |
1308835f | 291 | |
797476b8 DLM |
292 | static ssize_t queue_zoned_show(struct request_queue *q, char *page) |
293 | { | |
294 | switch (blk_queue_zoned_model(q)) { | |
295 | case BLK_ZONED_HA: | |
296 | return sprintf(page, "host-aware\n"); | |
297 | case BLK_ZONED_HM: | |
298 | return sprintf(page, "host-managed\n"); | |
299 | default: | |
300 | return sprintf(page, "none\n"); | |
301 | } | |
302 | } | |
303 | ||
ac9fafa1 AB |
304 | static ssize_t queue_nomerges_show(struct request_queue *q, char *page) |
305 | { | |
488991e2 AB |
306 | return queue_var_show((blk_queue_nomerges(q) << 1) | |
307 | blk_queue_noxmerges(q), page); | |
ac9fafa1 AB |
308 | } |
309 | ||
310 | static ssize_t queue_nomerges_store(struct request_queue *q, const char *page, | |
311 | size_t count) | |
312 | { | |
313 | unsigned long nm; | |
314 | ssize_t ret = queue_var_store(&nm, page, count); | |
315 | ||
b1f3b64d DR |
316 | if (ret < 0) |
317 | return ret; | |
318 | ||
bf0f9702 | 319 | spin_lock_irq(q->queue_lock); |
488991e2 AB |
320 | queue_flag_clear(QUEUE_FLAG_NOMERGES, q); |
321 | queue_flag_clear(QUEUE_FLAG_NOXMERGES, q); | |
322 | if (nm == 2) | |
bf0f9702 | 323 | queue_flag_set(QUEUE_FLAG_NOMERGES, q); |
488991e2 AB |
324 | else if (nm) |
325 | queue_flag_set(QUEUE_FLAG_NOXMERGES, q); | |
bf0f9702 | 326 | spin_unlock_irq(q->queue_lock); |
1308835f | 327 | |
ac9fafa1 AB |
328 | return ret; |
329 | } | |
330 | ||
c7c22e4d JA |
331 | static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page) |
332 | { | |
9cb308ce | 333 | bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags); |
5757a6d7 | 334 | bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags); |
c7c22e4d | 335 | |
5757a6d7 | 336 | return queue_var_show(set << force, page); |
c7c22e4d JA |
337 | } |
338 | ||
339 | static ssize_t | |
340 | queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count) | |
341 | { | |
342 | ssize_t ret = -EINVAL; | |
0a06ff06 | 343 | #ifdef CONFIG_SMP |
c7c22e4d JA |
344 | unsigned long val; |
345 | ||
346 | ret = queue_var_store(&val, page, count); | |
b1f3b64d DR |
347 | if (ret < 0) |
348 | return ret; | |
349 | ||
c7c22e4d | 350 | spin_lock_irq(q->queue_lock); |
e8037d49 | 351 | if (val == 2) { |
c7c22e4d | 352 | queue_flag_set(QUEUE_FLAG_SAME_COMP, q); |
e8037d49 ES |
353 | queue_flag_set(QUEUE_FLAG_SAME_FORCE, q); |
354 | } else if (val == 1) { | |
355 | queue_flag_set(QUEUE_FLAG_SAME_COMP, q); | |
356 | queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); | |
357 | } else if (val == 0) { | |
5757a6d7 DW |
358 | queue_flag_clear(QUEUE_FLAG_SAME_COMP, q); |
359 | queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); | |
360 | } | |
c7c22e4d JA |
361 | spin_unlock_irq(q->queue_lock); |
362 | #endif | |
363 | return ret; | |
364 | } | |
8324aa91 | 365 | |
06426adf JA |
366 | static ssize_t queue_poll_delay_show(struct request_queue *q, char *page) |
367 | { | |
64f1c21e JA |
368 | int val; |
369 | ||
370 | if (q->poll_nsec == -1) | |
371 | val = -1; | |
372 | else | |
373 | val = q->poll_nsec / 1000; | |
374 | ||
375 | return sprintf(page, "%d\n", val); | |
06426adf JA |
376 | } |
377 | ||
378 | static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page, | |
379 | size_t count) | |
380 | { | |
64f1c21e | 381 | int err, val; |
06426adf JA |
382 | |
383 | if (!q->mq_ops || !q->mq_ops->poll) | |
384 | return -EINVAL; | |
385 | ||
64f1c21e JA |
386 | err = kstrtoint(page, 10, &val); |
387 | if (err < 0) | |
388 | return err; | |
06426adf | 389 | |
64f1c21e JA |
390 | if (val == -1) |
391 | q->poll_nsec = -1; | |
392 | else | |
393 | q->poll_nsec = val * 1000; | |
394 | ||
395 | return count; | |
06426adf JA |
396 | } |
397 | ||
05229bee JA |
398 | static ssize_t queue_poll_show(struct request_queue *q, char *page) |
399 | { | |
400 | return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page); | |
401 | } | |
402 | ||
403 | static ssize_t queue_poll_store(struct request_queue *q, const char *page, | |
404 | size_t count) | |
405 | { | |
406 | unsigned long poll_on; | |
407 | ssize_t ret; | |
408 | ||
409 | if (!q->mq_ops || !q->mq_ops->poll) | |
410 | return -EINVAL; | |
411 | ||
412 | ret = queue_var_store(&poll_on, page, count); | |
413 | if (ret < 0) | |
414 | return ret; | |
415 | ||
416 | spin_lock_irq(q->queue_lock); | |
417 | if (poll_on) | |
418 | queue_flag_set(QUEUE_FLAG_POLL, q); | |
419 | else | |
420 | queue_flag_clear(QUEUE_FLAG_POLL, q); | |
421 | spin_unlock_irq(q->queue_lock); | |
422 | ||
423 | return ret; | |
424 | } | |
425 | ||
87760e5e JA |
426 | static ssize_t queue_wb_lat_show(struct request_queue *q, char *page) |
427 | { | |
428 | if (!q->rq_wb) | |
429 | return -EINVAL; | |
430 | ||
431 | return sprintf(page, "%llu\n", div_u64(q->rq_wb->min_lat_nsec, 1000)); | |
432 | } | |
433 | ||
434 | static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page, | |
435 | size_t count) | |
436 | { | |
80e091d1 | 437 | struct rq_wb *rwb; |
87760e5e | 438 | ssize_t ret; |
80e091d1 | 439 | s64 val; |
87760e5e | 440 | |
87760e5e JA |
441 | ret = queue_var_store64(&val, page); |
442 | if (ret < 0) | |
443 | return ret; | |
d62118b6 JA |
444 | if (val < -1) |
445 | return -EINVAL; | |
446 | ||
447 | rwb = q->rq_wb; | |
448 | if (!rwb) { | |
449 | ret = wbt_init(q); | |
450 | if (ret) | |
451 | return ret; | |
452 | ||
453 | rwb = q->rq_wb; | |
454 | if (!rwb) | |
455 | return -EINVAL; | |
456 | } | |
87760e5e | 457 | |
80e091d1 JA |
458 | if (val == -1) |
459 | rwb->min_lat_nsec = wbt_default_latency_nsec(q); | |
460 | else if (val >= 0) | |
461 | rwb->min_lat_nsec = val * 1000ULL; | |
d62118b6 JA |
462 | |
463 | if (rwb->enable_state == WBT_STATE_ON_DEFAULT) | |
464 | rwb->enable_state = WBT_STATE_ON_MANUAL; | |
80e091d1 JA |
465 | |
466 | wbt_update_limits(rwb); | |
87760e5e JA |
467 | return count; |
468 | } | |
469 | ||
93e9d8e8 JA |
470 | static ssize_t queue_wc_show(struct request_queue *q, char *page) |
471 | { | |
472 | if (test_bit(QUEUE_FLAG_WC, &q->queue_flags)) | |
473 | return sprintf(page, "write back\n"); | |
474 | ||
475 | return sprintf(page, "write through\n"); | |
476 | } | |
477 | ||
478 | static ssize_t queue_wc_store(struct request_queue *q, const char *page, | |
479 | size_t count) | |
480 | { | |
481 | int set = -1; | |
482 | ||
483 | if (!strncmp(page, "write back", 10)) | |
484 | set = 1; | |
485 | else if (!strncmp(page, "write through", 13) || | |
486 | !strncmp(page, "none", 4)) | |
487 | set = 0; | |
488 | ||
489 | if (set == -1) | |
490 | return -EINVAL; | |
491 | ||
492 | spin_lock_irq(q->queue_lock); | |
493 | if (set) | |
494 | queue_flag_set(QUEUE_FLAG_WC, q); | |
495 | else | |
496 | queue_flag_clear(QUEUE_FLAG_WC, q); | |
497 | spin_unlock_irq(q->queue_lock); | |
498 | ||
499 | return count; | |
500 | } | |
501 | ||
ea6ca600 YK |
502 | static ssize_t queue_dax_show(struct request_queue *q, char *page) |
503 | { | |
504 | return queue_var_show(blk_queue_dax(q), page); | |
505 | } | |
506 | ||
8324aa91 JA |
507 | static struct queue_sysfs_entry queue_requests_entry = { |
508 | .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR }, | |
509 | .show = queue_requests_show, | |
510 | .store = queue_requests_store, | |
511 | }; | |
512 | ||
513 | static struct queue_sysfs_entry queue_ra_entry = { | |
514 | .attr = {.name = "read_ahead_kb", .mode = S_IRUGO | S_IWUSR }, | |
515 | .show = queue_ra_show, | |
516 | .store = queue_ra_store, | |
517 | }; | |
518 | ||
519 | static struct queue_sysfs_entry queue_max_sectors_entry = { | |
520 | .attr = {.name = "max_sectors_kb", .mode = S_IRUGO | S_IWUSR }, | |
521 | .show = queue_max_sectors_show, | |
522 | .store = queue_max_sectors_store, | |
523 | }; | |
524 | ||
525 | static struct queue_sysfs_entry queue_max_hw_sectors_entry = { | |
526 | .attr = {.name = "max_hw_sectors_kb", .mode = S_IRUGO }, | |
527 | .show = queue_max_hw_sectors_show, | |
528 | }; | |
529 | ||
c77a5710 MP |
530 | static struct queue_sysfs_entry queue_max_segments_entry = { |
531 | .attr = {.name = "max_segments", .mode = S_IRUGO }, | |
532 | .show = queue_max_segments_show, | |
533 | }; | |
534 | ||
1e739730 CH |
535 | static struct queue_sysfs_entry queue_max_discard_segments_entry = { |
536 | .attr = {.name = "max_discard_segments", .mode = S_IRUGO }, | |
537 | .show = queue_max_discard_segments_show, | |
538 | }; | |
539 | ||
13f05c8d MP |
540 | static struct queue_sysfs_entry queue_max_integrity_segments_entry = { |
541 | .attr = {.name = "max_integrity_segments", .mode = S_IRUGO }, | |
542 | .show = queue_max_integrity_segments_show, | |
543 | }; | |
544 | ||
c77a5710 MP |
545 | static struct queue_sysfs_entry queue_max_segment_size_entry = { |
546 | .attr = {.name = "max_segment_size", .mode = S_IRUGO }, | |
547 | .show = queue_max_segment_size_show, | |
548 | }; | |
549 | ||
8324aa91 JA |
550 | static struct queue_sysfs_entry queue_iosched_entry = { |
551 | .attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR }, | |
552 | .show = elv_iosched_show, | |
553 | .store = elv_iosched_store, | |
554 | }; | |
555 | ||
e68b903c MP |
556 | static struct queue_sysfs_entry queue_hw_sector_size_entry = { |
557 | .attr = {.name = "hw_sector_size", .mode = S_IRUGO }, | |
e1defc4f MP |
558 | .show = queue_logical_block_size_show, |
559 | }; | |
560 | ||
561 | static struct queue_sysfs_entry queue_logical_block_size_entry = { | |
562 | .attr = {.name = "logical_block_size", .mode = S_IRUGO }, | |
563 | .show = queue_logical_block_size_show, | |
e68b903c MP |
564 | }; |
565 | ||
c72758f3 MP |
566 | static struct queue_sysfs_entry queue_physical_block_size_entry = { |
567 | .attr = {.name = "physical_block_size", .mode = S_IRUGO }, | |
568 | .show = queue_physical_block_size_show, | |
569 | }; | |
570 | ||
87caf97c HR |
571 | static struct queue_sysfs_entry queue_chunk_sectors_entry = { |
572 | .attr = {.name = "chunk_sectors", .mode = S_IRUGO }, | |
573 | .show = queue_chunk_sectors_show, | |
574 | }; | |
575 | ||
c72758f3 MP |
576 | static struct queue_sysfs_entry queue_io_min_entry = { |
577 | .attr = {.name = "minimum_io_size", .mode = S_IRUGO }, | |
578 | .show = queue_io_min_show, | |
579 | }; | |
580 | ||
581 | static struct queue_sysfs_entry queue_io_opt_entry = { | |
582 | .attr = {.name = "optimal_io_size", .mode = S_IRUGO }, | |
583 | .show = queue_io_opt_show, | |
e68b903c MP |
584 | }; |
585 | ||
86b37281 MP |
586 | static struct queue_sysfs_entry queue_discard_granularity_entry = { |
587 | .attr = {.name = "discard_granularity", .mode = S_IRUGO }, | |
588 | .show = queue_discard_granularity_show, | |
589 | }; | |
590 | ||
0034af03 JA |
591 | static struct queue_sysfs_entry queue_discard_max_hw_entry = { |
592 | .attr = {.name = "discard_max_hw_bytes", .mode = S_IRUGO }, | |
593 | .show = queue_discard_max_hw_show, | |
594 | }; | |
595 | ||
86b37281 | 596 | static struct queue_sysfs_entry queue_discard_max_entry = { |
0034af03 | 597 | .attr = {.name = "discard_max_bytes", .mode = S_IRUGO | S_IWUSR }, |
86b37281 | 598 | .show = queue_discard_max_show, |
0034af03 | 599 | .store = queue_discard_max_store, |
86b37281 MP |
600 | }; |
601 | ||
98262f27 MP |
602 | static struct queue_sysfs_entry queue_discard_zeroes_data_entry = { |
603 | .attr = {.name = "discard_zeroes_data", .mode = S_IRUGO }, | |
604 | .show = queue_discard_zeroes_data_show, | |
605 | }; | |
606 | ||
4363ac7c MP |
607 | static struct queue_sysfs_entry queue_write_same_max_entry = { |
608 | .attr = {.name = "write_same_max_bytes", .mode = S_IRUGO }, | |
609 | .show = queue_write_same_max_show, | |
610 | }; | |
611 | ||
a6f0788e CK |
612 | static struct queue_sysfs_entry queue_write_zeroes_max_entry = { |
613 | .attr = {.name = "write_zeroes_max_bytes", .mode = S_IRUGO }, | |
614 | .show = queue_write_zeroes_max_show, | |
615 | }; | |
616 | ||
1308835f BZ |
617 | static struct queue_sysfs_entry queue_nonrot_entry = { |
618 | .attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR }, | |
956bcb7c JA |
619 | .show = queue_show_nonrot, |
620 | .store = queue_store_nonrot, | |
1308835f BZ |
621 | }; |
622 | ||
797476b8 DLM |
623 | static struct queue_sysfs_entry queue_zoned_entry = { |
624 | .attr = {.name = "zoned", .mode = S_IRUGO }, | |
625 | .show = queue_zoned_show, | |
626 | }; | |
627 | ||
ac9fafa1 AB |
628 | static struct queue_sysfs_entry queue_nomerges_entry = { |
629 | .attr = {.name = "nomerges", .mode = S_IRUGO | S_IWUSR }, | |
630 | .show = queue_nomerges_show, | |
631 | .store = queue_nomerges_store, | |
632 | }; | |
633 | ||
c7c22e4d JA |
634 | static struct queue_sysfs_entry queue_rq_affinity_entry = { |
635 | .attr = {.name = "rq_affinity", .mode = S_IRUGO | S_IWUSR }, | |
636 | .show = queue_rq_affinity_show, | |
637 | .store = queue_rq_affinity_store, | |
638 | }; | |
639 | ||
bc58ba94 JA |
640 | static struct queue_sysfs_entry queue_iostats_entry = { |
641 | .attr = {.name = "iostats", .mode = S_IRUGO | S_IWUSR }, | |
956bcb7c JA |
642 | .show = queue_show_iostats, |
643 | .store = queue_store_iostats, | |
bc58ba94 JA |
644 | }; |
645 | ||
e2e1a148 JA |
646 | static struct queue_sysfs_entry queue_random_entry = { |
647 | .attr = {.name = "add_random", .mode = S_IRUGO | S_IWUSR }, | |
956bcb7c JA |
648 | .show = queue_show_random, |
649 | .store = queue_store_random, | |
e2e1a148 JA |
650 | }; |
651 | ||
05229bee JA |
652 | static struct queue_sysfs_entry queue_poll_entry = { |
653 | .attr = {.name = "io_poll", .mode = S_IRUGO | S_IWUSR }, | |
654 | .show = queue_poll_show, | |
655 | .store = queue_poll_store, | |
656 | }; | |
657 | ||
06426adf JA |
658 | static struct queue_sysfs_entry queue_poll_delay_entry = { |
659 | .attr = {.name = "io_poll_delay", .mode = S_IRUGO | S_IWUSR }, | |
660 | .show = queue_poll_delay_show, | |
661 | .store = queue_poll_delay_store, | |
662 | }; | |
663 | ||
93e9d8e8 JA |
664 | static struct queue_sysfs_entry queue_wc_entry = { |
665 | .attr = {.name = "write_cache", .mode = S_IRUGO | S_IWUSR }, | |
666 | .show = queue_wc_show, | |
667 | .store = queue_wc_store, | |
668 | }; | |
669 | ||
ea6ca600 YK |
670 | static struct queue_sysfs_entry queue_dax_entry = { |
671 | .attr = {.name = "dax", .mode = S_IRUGO }, | |
672 | .show = queue_dax_show, | |
673 | }; | |
674 | ||
87760e5e JA |
675 | static struct queue_sysfs_entry queue_wb_lat_entry = { |
676 | .attr = {.name = "wbt_lat_usec", .mode = S_IRUGO | S_IWUSR }, | |
677 | .show = queue_wb_lat_show, | |
678 | .store = queue_wb_lat_store, | |
679 | }; | |
680 | ||
297e3d85 SL |
681 | #ifdef CONFIG_BLK_DEV_THROTTLING_LOW |
682 | static struct queue_sysfs_entry throtl_sample_time_entry = { | |
683 | .attr = {.name = "throttle_sample_time", .mode = S_IRUGO | S_IWUSR }, | |
684 | .show = blk_throtl_sample_time_show, | |
685 | .store = blk_throtl_sample_time_store, | |
686 | }; | |
687 | #endif | |
688 | ||
8324aa91 JA |
689 | static struct attribute *default_attrs[] = { |
690 | &queue_requests_entry.attr, | |
691 | &queue_ra_entry.attr, | |
692 | &queue_max_hw_sectors_entry.attr, | |
693 | &queue_max_sectors_entry.attr, | |
c77a5710 | 694 | &queue_max_segments_entry.attr, |
1e739730 | 695 | &queue_max_discard_segments_entry.attr, |
13f05c8d | 696 | &queue_max_integrity_segments_entry.attr, |
c77a5710 | 697 | &queue_max_segment_size_entry.attr, |
8324aa91 | 698 | &queue_iosched_entry.attr, |
e68b903c | 699 | &queue_hw_sector_size_entry.attr, |
e1defc4f | 700 | &queue_logical_block_size_entry.attr, |
c72758f3 | 701 | &queue_physical_block_size_entry.attr, |
87caf97c | 702 | &queue_chunk_sectors_entry.attr, |
c72758f3 MP |
703 | &queue_io_min_entry.attr, |
704 | &queue_io_opt_entry.attr, | |
86b37281 MP |
705 | &queue_discard_granularity_entry.attr, |
706 | &queue_discard_max_entry.attr, | |
0034af03 | 707 | &queue_discard_max_hw_entry.attr, |
98262f27 | 708 | &queue_discard_zeroes_data_entry.attr, |
4363ac7c | 709 | &queue_write_same_max_entry.attr, |
a6f0788e | 710 | &queue_write_zeroes_max_entry.attr, |
1308835f | 711 | &queue_nonrot_entry.attr, |
797476b8 | 712 | &queue_zoned_entry.attr, |
ac9fafa1 | 713 | &queue_nomerges_entry.attr, |
c7c22e4d | 714 | &queue_rq_affinity_entry.attr, |
bc58ba94 | 715 | &queue_iostats_entry.attr, |
e2e1a148 | 716 | &queue_random_entry.attr, |
05229bee | 717 | &queue_poll_entry.attr, |
93e9d8e8 | 718 | &queue_wc_entry.attr, |
ea6ca600 | 719 | &queue_dax_entry.attr, |
87760e5e | 720 | &queue_wb_lat_entry.attr, |
06426adf | 721 | &queue_poll_delay_entry.attr, |
297e3d85 SL |
722 | #ifdef CONFIG_BLK_DEV_THROTTLING_LOW |
723 | &throtl_sample_time_entry.attr, | |
724 | #endif | |
8324aa91 JA |
725 | NULL, |
726 | }; | |
727 | ||
728 | #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr) | |
729 | ||
730 | static ssize_t | |
731 | queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page) | |
732 | { | |
733 | struct queue_sysfs_entry *entry = to_queue(attr); | |
734 | struct request_queue *q = | |
735 | container_of(kobj, struct request_queue, kobj); | |
736 | ssize_t res; | |
737 | ||
738 | if (!entry->show) | |
739 | return -EIO; | |
740 | mutex_lock(&q->sysfs_lock); | |
3f3299d5 | 741 | if (blk_queue_dying(q)) { |
8324aa91 JA |
742 | mutex_unlock(&q->sysfs_lock); |
743 | return -ENOENT; | |
744 | } | |
745 | res = entry->show(q, page); | |
746 | mutex_unlock(&q->sysfs_lock); | |
747 | return res; | |
748 | } | |
749 | ||
750 | static ssize_t | |
751 | queue_attr_store(struct kobject *kobj, struct attribute *attr, | |
752 | const char *page, size_t length) | |
753 | { | |
754 | struct queue_sysfs_entry *entry = to_queue(attr); | |
6728cb0e | 755 | struct request_queue *q; |
8324aa91 JA |
756 | ssize_t res; |
757 | ||
758 | if (!entry->store) | |
759 | return -EIO; | |
6728cb0e JA |
760 | |
761 | q = container_of(kobj, struct request_queue, kobj); | |
8324aa91 | 762 | mutex_lock(&q->sysfs_lock); |
3f3299d5 | 763 | if (blk_queue_dying(q)) { |
8324aa91 JA |
764 | mutex_unlock(&q->sysfs_lock); |
765 | return -ENOENT; | |
766 | } | |
767 | res = entry->store(q, page, length); | |
768 | mutex_unlock(&q->sysfs_lock); | |
769 | return res; | |
770 | } | |
771 | ||
548bc8e1 TH |
772 | static void blk_free_queue_rcu(struct rcu_head *rcu_head) |
773 | { | |
774 | struct request_queue *q = container_of(rcu_head, struct request_queue, | |
775 | rcu_head); | |
776 | kmem_cache_free(blk_requestq_cachep, q); | |
777 | } | |
778 | ||
8324aa91 | 779 | /** |
499337bb AM |
780 | * blk_release_queue: - release a &struct request_queue when it is no longer needed |
781 | * @kobj: the kobj belonging to the request queue to be released | |
8324aa91 JA |
782 | * |
783 | * Description: | |
499337bb | 784 | * blk_release_queue is the pair to blk_init_queue() or |
8324aa91 JA |
785 | * blk_queue_make_request(). It should be called when a request queue is |
786 | * being released; typically when a block device is being de-registered. | |
787 | * Currently, its primary task it to free all the &struct request | |
788 | * structures that were allocated to the queue and the queue itself. | |
789 | * | |
45a9c9d9 BVA |
790 | * Note: |
791 | * The low level driver must have finished any outstanding requests first | |
792 | * via blk_cleanup_queue(). | |
8324aa91 JA |
793 | **/ |
794 | static void blk_release_queue(struct kobject *kobj) | |
795 | { | |
796 | struct request_queue *q = | |
797 | container_of(kobj, struct request_queue, kobj); | |
8324aa91 | 798 | |
34dbad5d OS |
799 | if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags)) |
800 | blk_stat_remove_callback(q, q->poll_cb); | |
801 | blk_stat_free_callback(q->poll_cb); | |
d03f6cdc | 802 | bdi_put(q->backing_dev_info); |
e8989fae TH |
803 | blkcg_exit_queue(q); |
804 | ||
7e5a8794 | 805 | if (q->elevator) { |
7e5a8794 | 806 | ioc_clear_queue(q); |
54d5329d | 807 | elevator_exit(q, q->elevator); |
7e5a8794 | 808 | } |
777eb1bf | 809 | |
34dbad5d OS |
810 | blk_free_queue_stats(q->stats); |
811 | ||
b425e504 | 812 | blk_exit_rl(q, &q->root_rl); |
8324aa91 JA |
813 | |
814 | if (q->queue_tags) | |
815 | __blk_queue_free_tags(q); | |
816 | ||
6d247d7f CH |
817 | if (!q->mq_ops) { |
818 | if (q->exit_rq_fn) | |
819 | q->exit_rq_fn(q, q->fq->flush_rq); | |
f70ced09 | 820 | blk_free_flush_queue(q->fq); |
6d247d7f | 821 | } else { |
e09aae7e | 822 | blk_mq_release(q); |
6d247d7f | 823 | } |
18741986 | 824 | |
8324aa91 JA |
825 | blk_trace_shutdown(q); |
826 | ||
62ebce16 OS |
827 | if (q->mq_ops) |
828 | blk_mq_debugfs_unregister(q); | |
829 | ||
54efd50b KO |
830 | if (q->bio_split) |
831 | bioset_free(q->bio_split); | |
832 | ||
a73f730d | 833 | ida_simple_remove(&blk_queue_ida, q->id); |
548bc8e1 | 834 | call_rcu(&q->rcu_head, blk_free_queue_rcu); |
8324aa91 JA |
835 | } |
836 | ||
52cf25d0 | 837 | static const struct sysfs_ops queue_sysfs_ops = { |
8324aa91 JA |
838 | .show = queue_attr_show, |
839 | .store = queue_attr_store, | |
840 | }; | |
841 | ||
842 | struct kobj_type blk_queue_ktype = { | |
843 | .sysfs_ops = &queue_sysfs_ops, | |
844 | .default_attrs = default_attrs, | |
845 | .release = blk_release_queue, | |
846 | }; | |
847 | ||
848 | int blk_register_queue(struct gendisk *disk) | |
849 | { | |
850 | int ret; | |
1d54ad6d | 851 | struct device *dev = disk_to_dev(disk); |
8324aa91 JA |
852 | struct request_queue *q = disk->queue; |
853 | ||
fb199746 | 854 | if (WARN_ON(!q)) |
8324aa91 JA |
855 | return -ENXIO; |
856 | ||
334335d2 OS |
857 | WARN_ONCE(test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags), |
858 | "%s is registering an already registered queue\n", | |
859 | kobject_name(&dev->kobj)); | |
860 | queue_flag_set_unlocked(QUEUE_FLAG_REGISTERED, q); | |
861 | ||
749fefe6 | 862 | /* |
17497acb TH |
863 | * SCSI probing may synchronously create and destroy a lot of |
864 | * request_queues for non-existent devices. Shutting down a fully | |
865 | * functional queue takes measureable wallclock time as RCU grace | |
866 | * periods are involved. To avoid excessive latency in these | |
867 | * cases, a request_queue starts out in a degraded mode which is | |
868 | * faster to shut down and is made fully functional here as | |
869 | * request_queues for non-existent devices never get registered. | |
749fefe6 | 870 | */ |
df35c7c9 AS |
871 | if (!blk_queue_init_done(q)) { |
872 | queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q); | |
3ef28e83 | 873 | percpu_ref_switch_to_percpu(&q->q_usage_counter); |
df35c7c9 AS |
874 | blk_queue_bypass_end(q); |
875 | } | |
749fefe6 | 876 | |
1d54ad6d LZ |
877 | ret = blk_trace_init_sysfs(dev); |
878 | if (ret) | |
879 | return ret; | |
880 | ||
b410aff2 TE |
881 | /* Prevent changes through sysfs until registration is completed. */ |
882 | mutex_lock(&q->sysfs_lock); | |
883 | ||
c9059598 | 884 | ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue"); |
ed5302d3 LY |
885 | if (ret < 0) { |
886 | blk_trace_remove_sysfs(dev); | |
b410aff2 | 887 | goto unlock; |
ed5302d3 | 888 | } |
8324aa91 | 889 | |
a8ecdd71 | 890 | if (q->mq_ops) { |
2d0364c8 | 891 | __blk_mq_register_dev(dev, q); |
a8ecdd71 BVA |
892 | blk_mq_debugfs_register(q); |
893 | } | |
9c1051aa | 894 | |
8324aa91 JA |
895 | kobject_uevent(&q->kobj, KOBJ_ADD); |
896 | ||
8330cdb0 | 897 | wbt_enable_default(q); |
87760e5e | 898 | |
d61fcfa4 SL |
899 | blk_throtl_register_queue(q); |
900 | ||
80c6b157 OS |
901 | if (q->request_fn || (q->mq_ops && q->elevator)) { |
902 | ret = elv_register_queue(q); | |
903 | if (ret) { | |
904 | kobject_uevent(&q->kobj, KOBJ_REMOVE); | |
905 | kobject_del(&q->kobj); | |
906 | blk_trace_remove_sysfs(dev); | |
907 | kobject_put(&dev->kobj); | |
b410aff2 | 908 | goto unlock; |
80c6b157 | 909 | } |
8324aa91 | 910 | } |
b410aff2 TE |
911 | ret = 0; |
912 | unlock: | |
913 | mutex_unlock(&q->sysfs_lock); | |
914 | return ret; | |
8324aa91 JA |
915 | } |
916 | ||
917 | void blk_unregister_queue(struct gendisk *disk) | |
918 | { | |
919 | struct request_queue *q = disk->queue; | |
920 | ||
fb199746 AM |
921 | if (WARN_ON(!q)) |
922 | return; | |
923 | ||
334335d2 OS |
924 | queue_flag_clear_unlocked(QUEUE_FLAG_REGISTERED, q); |
925 | ||
02ba8893 OS |
926 | wbt_exit(q); |
927 | ||
928 | ||
320ae51f | 929 | if (q->mq_ops) |
b21d5b30 | 930 | blk_mq_unregister_dev(disk_to_dev(disk), q); |
320ae51f | 931 | |
80c6b157 | 932 | if (q->request_fn || (q->mq_ops && q->elevator)) |
8324aa91 JA |
933 | elv_unregister_queue(q); |
934 | ||
48c0d4d4 ZK |
935 | kobject_uevent(&q->kobj, KOBJ_REMOVE); |
936 | kobject_del(&q->kobj); | |
937 | blk_trace_remove_sysfs(disk_to_dev(disk)); | |
938 | kobject_put(&disk_to_dev(disk)->kobj); | |
8324aa91 | 939 | } |