nbd: fix setting of 'error' in NBD_DO_IT ioctl
[linux-2.6-block.git] / block / blk-sysfs.c
CommitLineData
8324aa91
JA
1/*
2 * Functions related to sysfs handling
3 */
4#include <linux/kernel.h>
5a0e3ad6 5#include <linux/slab.h>
8324aa91
JA
6#include <linux/module.h>
7#include <linux/bio.h>
8#include <linux/blkdev.h>
66114cad 9#include <linux/backing-dev.h>
8324aa91 10#include <linux/blktrace_api.h>
320ae51f 11#include <linux/blk-mq.h>
eea8f41c 12#include <linux/blk-cgroup.h>
8324aa91
JA
13
14#include "blk.h"
3edcc0ce 15#include "blk-mq.h"
87760e5e 16#include "blk-wbt.h"
8324aa91
JA
17
18struct queue_sysfs_entry {
19 struct attribute attr;
20 ssize_t (*show)(struct request_queue *, char *);
21 ssize_t (*store)(struct request_queue *, const char *, size_t);
22};
23
24static ssize_t
9cb308ce 25queue_var_show(unsigned long var, char *page)
8324aa91 26{
9cb308ce 27 return sprintf(page, "%lu\n", var);
8324aa91
JA
28}
29
30static ssize_t
31queue_var_store(unsigned long *var, const char *page, size_t count)
32{
b1f3b64d
DR
33 int err;
34 unsigned long v;
35
ed751e68 36 err = kstrtoul(page, 10, &v);
b1f3b64d
DR
37 if (err || v > UINT_MAX)
38 return -EINVAL;
39
40 *var = v;
8324aa91 41
8324aa91
JA
42 return count;
43}
44
87760e5e
JA
45static ssize_t queue_var_store64(u64 *var, const char *page)
46{
47 int err;
48 u64 v;
49
50 err = kstrtou64(page, 10, &v);
51 if (err < 0)
52 return err;
53
54 *var = v;
55 return 0;
56}
57
8324aa91
JA
58static ssize_t queue_requests_show(struct request_queue *q, char *page)
59{
60 return queue_var_show(q->nr_requests, (page));
61}
62
63static ssize_t
64queue_requests_store(struct request_queue *q, const char *page, size_t count)
65{
8324aa91 66 unsigned long nr;
e3a2b3f9 67 int ret, err;
b8a9ae77 68
e3a2b3f9 69 if (!q->request_fn && !q->mq_ops)
b8a9ae77
JA
70 return -EINVAL;
71
72 ret = queue_var_store(&nr, page, count);
b1f3b64d
DR
73 if (ret < 0)
74 return ret;
75
8324aa91
JA
76 if (nr < BLKDEV_MIN_RQ)
77 nr = BLKDEV_MIN_RQ;
78
e3a2b3f9
JA
79 if (q->request_fn)
80 err = blk_update_nr_requests(q, nr);
81 else
82 err = blk_mq_update_nr_requests(q, nr);
83
84 if (err)
85 return err;
86
8324aa91
JA
87 return ret;
88}
89
90static ssize_t queue_ra_show(struct request_queue *q, char *page)
91{
9cb308ce 92 unsigned long ra_kb = q->backing_dev_info.ra_pages <<
09cbfeaf 93 (PAGE_SHIFT - 10);
8324aa91
JA
94
95 return queue_var_show(ra_kb, (page));
96}
97
98static ssize_t
99queue_ra_store(struct request_queue *q, const char *page, size_t count)
100{
101 unsigned long ra_kb;
102 ssize_t ret = queue_var_store(&ra_kb, page, count);
103
b1f3b64d
DR
104 if (ret < 0)
105 return ret;
106
09cbfeaf 107 q->backing_dev_info.ra_pages = ra_kb >> (PAGE_SHIFT - 10);
8324aa91
JA
108
109 return ret;
110}
111
112static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
113{
ae03bf63 114 int max_sectors_kb = queue_max_sectors(q) >> 1;
8324aa91
JA
115
116 return queue_var_show(max_sectors_kb, (page));
117}
118
c77a5710
MP
119static ssize_t queue_max_segments_show(struct request_queue *q, char *page)
120{
121 return queue_var_show(queue_max_segments(q), (page));
122}
123
13f05c8d
MP
124static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page)
125{
126 return queue_var_show(q->limits.max_integrity_segments, (page));
127}
128
c77a5710
MP
129static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
130{
e692cb66 131 if (blk_queue_cluster(q))
c77a5710
MP
132 return queue_var_show(queue_max_segment_size(q), (page));
133
09cbfeaf 134 return queue_var_show(PAGE_SIZE, (page));
c77a5710
MP
135}
136
e1defc4f 137static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
e68b903c 138{
e1defc4f 139 return queue_var_show(queue_logical_block_size(q), page);
e68b903c
MP
140}
141
c72758f3
MP
142static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page)
143{
144 return queue_var_show(queue_physical_block_size(q), page);
145}
146
87caf97c
HR
147static ssize_t queue_chunk_sectors_show(struct request_queue *q, char *page)
148{
149 return queue_var_show(q->limits.chunk_sectors, page);
150}
151
c72758f3
MP
152static ssize_t queue_io_min_show(struct request_queue *q, char *page)
153{
154 return queue_var_show(queue_io_min(q), page);
155}
156
157static ssize_t queue_io_opt_show(struct request_queue *q, char *page)
158{
159 return queue_var_show(queue_io_opt(q), page);
e68b903c
MP
160}
161
86b37281
MP
162static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page)
163{
164 return queue_var_show(q->limits.discard_granularity, page);
165}
166
0034af03
JA
167static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page)
168{
0034af03 169
18f922d0
A
170 return sprintf(page, "%llu\n",
171 (unsigned long long)q->limits.max_hw_discard_sectors << 9);
0034af03
JA
172}
173
86b37281
MP
174static ssize_t queue_discard_max_show(struct request_queue *q, char *page)
175{
a934a00a
MP
176 return sprintf(page, "%llu\n",
177 (unsigned long long)q->limits.max_discard_sectors << 9);
86b37281
MP
178}
179
0034af03
JA
180static ssize_t queue_discard_max_store(struct request_queue *q,
181 const char *page, size_t count)
182{
183 unsigned long max_discard;
184 ssize_t ret = queue_var_store(&max_discard, page, count);
185
186 if (ret < 0)
187 return ret;
188
189 if (max_discard & (q->limits.discard_granularity - 1))
190 return -EINVAL;
191
192 max_discard >>= 9;
193 if (max_discard > UINT_MAX)
194 return -EINVAL;
195
196 if (max_discard > q->limits.max_hw_discard_sectors)
197 max_discard = q->limits.max_hw_discard_sectors;
198
199 q->limits.max_discard_sectors = max_discard;
200 return ret;
201}
202
98262f27
MP
203static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page)
204{
205 return queue_var_show(queue_discard_zeroes_data(q), page);
206}
207
4363ac7c
MP
208static ssize_t queue_write_same_max_show(struct request_queue *q, char *page)
209{
210 return sprintf(page, "%llu\n",
211 (unsigned long long)q->limits.max_write_same_sectors << 9);
212}
213
214
8324aa91
JA
215static ssize_t
216queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
217{
218 unsigned long max_sectors_kb,
ae03bf63 219 max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1,
09cbfeaf 220 page_kb = 1 << (PAGE_SHIFT - 10);
8324aa91
JA
221 ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
222
b1f3b64d
DR
223 if (ret < 0)
224 return ret;
225
ca369d51
MP
226 max_hw_sectors_kb = min_not_zero(max_hw_sectors_kb, (unsigned long)
227 q->limits.max_dev_sectors >> 1);
228
8324aa91
JA
229 if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
230 return -EINVAL;
7c239517 231
8324aa91 232 spin_lock_irq(q->queue_lock);
c295fc05 233 q->limits.max_sectors = max_sectors_kb << 1;
8324aa91
JA
234 spin_unlock_irq(q->queue_lock);
235
236 return ret;
237}
238
239static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
240{
ae03bf63 241 int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1;
8324aa91
JA
242
243 return queue_var_show(max_hw_sectors_kb, (page));
244}
245
956bcb7c
JA
246#define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \
247static ssize_t \
248queue_show_##name(struct request_queue *q, char *page) \
249{ \
250 int bit; \
251 bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \
252 return queue_var_show(neg ? !bit : bit, page); \
253} \
254static ssize_t \
255queue_store_##name(struct request_queue *q, const char *page, size_t count) \
256{ \
257 unsigned long val; \
258 ssize_t ret; \
259 ret = queue_var_store(&val, page, count); \
c678ef52
AB
260 if (ret < 0) \
261 return ret; \
956bcb7c
JA
262 if (neg) \
263 val = !val; \
264 \
265 spin_lock_irq(q->queue_lock); \
266 if (val) \
267 queue_flag_set(QUEUE_FLAG_##flag, q); \
268 else \
269 queue_flag_clear(QUEUE_FLAG_##flag, q); \
270 spin_unlock_irq(q->queue_lock); \
271 return ret; \
1308835f
BZ
272}
273
956bcb7c
JA
274QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1);
275QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0);
276QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0);
277#undef QUEUE_SYSFS_BIT_FNS
1308835f 278
797476b8
DLM
279static ssize_t queue_zoned_show(struct request_queue *q, char *page)
280{
281 switch (blk_queue_zoned_model(q)) {
282 case BLK_ZONED_HA:
283 return sprintf(page, "host-aware\n");
284 case BLK_ZONED_HM:
285 return sprintf(page, "host-managed\n");
286 default:
287 return sprintf(page, "none\n");
288 }
289}
290
ac9fafa1
AB
291static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
292{
488991e2
AB
293 return queue_var_show((blk_queue_nomerges(q) << 1) |
294 blk_queue_noxmerges(q), page);
ac9fafa1
AB
295}
296
297static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
298 size_t count)
299{
300 unsigned long nm;
301 ssize_t ret = queue_var_store(&nm, page, count);
302
b1f3b64d
DR
303 if (ret < 0)
304 return ret;
305
bf0f9702 306 spin_lock_irq(q->queue_lock);
488991e2
AB
307 queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
308 queue_flag_clear(QUEUE_FLAG_NOXMERGES, q);
309 if (nm == 2)
bf0f9702 310 queue_flag_set(QUEUE_FLAG_NOMERGES, q);
488991e2
AB
311 else if (nm)
312 queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
bf0f9702 313 spin_unlock_irq(q->queue_lock);
1308835f 314
ac9fafa1
AB
315 return ret;
316}
317
c7c22e4d
JA
318static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page)
319{
9cb308ce 320 bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags);
5757a6d7 321 bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags);
c7c22e4d 322
5757a6d7 323 return queue_var_show(set << force, page);
c7c22e4d
JA
324}
325
326static ssize_t
327queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
328{
329 ssize_t ret = -EINVAL;
0a06ff06 330#ifdef CONFIG_SMP
c7c22e4d
JA
331 unsigned long val;
332
333 ret = queue_var_store(&val, page, count);
b1f3b64d
DR
334 if (ret < 0)
335 return ret;
336
c7c22e4d 337 spin_lock_irq(q->queue_lock);
e8037d49 338 if (val == 2) {
c7c22e4d 339 queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
e8037d49
ES
340 queue_flag_set(QUEUE_FLAG_SAME_FORCE, q);
341 } else if (val == 1) {
342 queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
343 queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
344 } else if (val == 0) {
5757a6d7
DW
345 queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
346 queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
347 }
c7c22e4d
JA
348 spin_unlock_irq(q->queue_lock);
349#endif
350 return ret;
351}
8324aa91 352
06426adf
JA
353static ssize_t queue_poll_delay_show(struct request_queue *q, char *page)
354{
64f1c21e
JA
355 int val;
356
357 if (q->poll_nsec == -1)
358 val = -1;
359 else
360 val = q->poll_nsec / 1000;
361
362 return sprintf(page, "%d\n", val);
06426adf
JA
363}
364
365static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page,
366 size_t count)
367{
64f1c21e 368 int err, val;
06426adf
JA
369
370 if (!q->mq_ops || !q->mq_ops->poll)
371 return -EINVAL;
372
64f1c21e
JA
373 err = kstrtoint(page, 10, &val);
374 if (err < 0)
375 return err;
06426adf 376
64f1c21e
JA
377 if (val == -1)
378 q->poll_nsec = -1;
379 else
380 q->poll_nsec = val * 1000;
381
382 return count;
06426adf
JA
383}
384
05229bee
JA
385static ssize_t queue_poll_show(struct request_queue *q, char *page)
386{
387 return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page);
388}
389
390static ssize_t queue_poll_store(struct request_queue *q, const char *page,
391 size_t count)
392{
393 unsigned long poll_on;
394 ssize_t ret;
395
396 if (!q->mq_ops || !q->mq_ops->poll)
397 return -EINVAL;
398
399 ret = queue_var_store(&poll_on, page, count);
400 if (ret < 0)
401 return ret;
402
403 spin_lock_irq(q->queue_lock);
404 if (poll_on)
405 queue_flag_set(QUEUE_FLAG_POLL, q);
406 else
407 queue_flag_clear(QUEUE_FLAG_POLL, q);
408 spin_unlock_irq(q->queue_lock);
409
410 return ret;
411}
412
87760e5e
JA
413static ssize_t queue_wb_lat_show(struct request_queue *q, char *page)
414{
415 if (!q->rq_wb)
416 return -EINVAL;
417
418 return sprintf(page, "%llu\n", div_u64(q->rq_wb->min_lat_nsec, 1000));
419}
420
421static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
422 size_t count)
423{
424 ssize_t ret;
425 u64 val;
426
427 if (!q->rq_wb)
428 return -EINVAL;
429
430 ret = queue_var_store64(&val, page);
431 if (ret < 0)
432 return ret;
433
434 q->rq_wb->min_lat_nsec = val * 1000ULL;
435 wbt_update_limits(q->rq_wb);
436 return count;
437}
438
93e9d8e8
JA
439static ssize_t queue_wc_show(struct request_queue *q, char *page)
440{
441 if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
442 return sprintf(page, "write back\n");
443
444 return sprintf(page, "write through\n");
445}
446
447static ssize_t queue_wc_store(struct request_queue *q, const char *page,
448 size_t count)
449{
450 int set = -1;
451
452 if (!strncmp(page, "write back", 10))
453 set = 1;
454 else if (!strncmp(page, "write through", 13) ||
455 !strncmp(page, "none", 4))
456 set = 0;
457
458 if (set == -1)
459 return -EINVAL;
460
461 spin_lock_irq(q->queue_lock);
462 if (set)
463 queue_flag_set(QUEUE_FLAG_WC, q);
464 else
465 queue_flag_clear(QUEUE_FLAG_WC, q);
466 spin_unlock_irq(q->queue_lock);
467
468 return count;
469}
470
ea6ca600
YK
471static ssize_t queue_dax_show(struct request_queue *q, char *page)
472{
473 return queue_var_show(blk_queue_dax(q), page);
474}
475
cf43e6be
JA
476static ssize_t print_stat(char *page, struct blk_rq_stat *stat, const char *pre)
477{
478 return sprintf(page, "%s samples=%llu, mean=%lld, min=%lld, max=%lld\n",
479 pre, (long long) stat->nr_samples,
480 (long long) stat->mean, (long long) stat->min,
481 (long long) stat->max);
482}
483
484static ssize_t queue_stats_show(struct request_queue *q, char *page)
485{
486 struct blk_rq_stat stat[2];
487 ssize_t ret;
488
489 blk_queue_stat_get(q, stat);
490
491 ret = print_stat(page, &stat[BLK_STAT_READ], "read :");
492 ret += print_stat(page + ret, &stat[BLK_STAT_WRITE], "write:");
493 return ret;
494}
495
8324aa91
JA
496static struct queue_sysfs_entry queue_requests_entry = {
497 .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
498 .show = queue_requests_show,
499 .store = queue_requests_store,
500};
501
502static struct queue_sysfs_entry queue_ra_entry = {
503 .attr = {.name = "read_ahead_kb", .mode = S_IRUGO | S_IWUSR },
504 .show = queue_ra_show,
505 .store = queue_ra_store,
506};
507
508static struct queue_sysfs_entry queue_max_sectors_entry = {
509 .attr = {.name = "max_sectors_kb", .mode = S_IRUGO | S_IWUSR },
510 .show = queue_max_sectors_show,
511 .store = queue_max_sectors_store,
512};
513
514static struct queue_sysfs_entry queue_max_hw_sectors_entry = {
515 .attr = {.name = "max_hw_sectors_kb", .mode = S_IRUGO },
516 .show = queue_max_hw_sectors_show,
517};
518
c77a5710
MP
519static struct queue_sysfs_entry queue_max_segments_entry = {
520 .attr = {.name = "max_segments", .mode = S_IRUGO },
521 .show = queue_max_segments_show,
522};
523
13f05c8d
MP
524static struct queue_sysfs_entry queue_max_integrity_segments_entry = {
525 .attr = {.name = "max_integrity_segments", .mode = S_IRUGO },
526 .show = queue_max_integrity_segments_show,
527};
528
c77a5710
MP
529static struct queue_sysfs_entry queue_max_segment_size_entry = {
530 .attr = {.name = "max_segment_size", .mode = S_IRUGO },
531 .show = queue_max_segment_size_show,
532};
533
8324aa91
JA
534static struct queue_sysfs_entry queue_iosched_entry = {
535 .attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR },
536 .show = elv_iosched_show,
537 .store = elv_iosched_store,
538};
539
e68b903c
MP
540static struct queue_sysfs_entry queue_hw_sector_size_entry = {
541 .attr = {.name = "hw_sector_size", .mode = S_IRUGO },
e1defc4f
MP
542 .show = queue_logical_block_size_show,
543};
544
545static struct queue_sysfs_entry queue_logical_block_size_entry = {
546 .attr = {.name = "logical_block_size", .mode = S_IRUGO },
547 .show = queue_logical_block_size_show,
e68b903c
MP
548};
549
c72758f3
MP
550static struct queue_sysfs_entry queue_physical_block_size_entry = {
551 .attr = {.name = "physical_block_size", .mode = S_IRUGO },
552 .show = queue_physical_block_size_show,
553};
554
87caf97c
HR
555static struct queue_sysfs_entry queue_chunk_sectors_entry = {
556 .attr = {.name = "chunk_sectors", .mode = S_IRUGO },
557 .show = queue_chunk_sectors_show,
558};
559
c72758f3
MP
560static struct queue_sysfs_entry queue_io_min_entry = {
561 .attr = {.name = "minimum_io_size", .mode = S_IRUGO },
562 .show = queue_io_min_show,
563};
564
565static struct queue_sysfs_entry queue_io_opt_entry = {
566 .attr = {.name = "optimal_io_size", .mode = S_IRUGO },
567 .show = queue_io_opt_show,
e68b903c
MP
568};
569
86b37281
MP
570static struct queue_sysfs_entry queue_discard_granularity_entry = {
571 .attr = {.name = "discard_granularity", .mode = S_IRUGO },
572 .show = queue_discard_granularity_show,
573};
574
0034af03
JA
575static struct queue_sysfs_entry queue_discard_max_hw_entry = {
576 .attr = {.name = "discard_max_hw_bytes", .mode = S_IRUGO },
577 .show = queue_discard_max_hw_show,
578};
579
86b37281 580static struct queue_sysfs_entry queue_discard_max_entry = {
0034af03 581 .attr = {.name = "discard_max_bytes", .mode = S_IRUGO | S_IWUSR },
86b37281 582 .show = queue_discard_max_show,
0034af03 583 .store = queue_discard_max_store,
86b37281
MP
584};
585
98262f27
MP
586static struct queue_sysfs_entry queue_discard_zeroes_data_entry = {
587 .attr = {.name = "discard_zeroes_data", .mode = S_IRUGO },
588 .show = queue_discard_zeroes_data_show,
589};
590
4363ac7c
MP
591static struct queue_sysfs_entry queue_write_same_max_entry = {
592 .attr = {.name = "write_same_max_bytes", .mode = S_IRUGO },
593 .show = queue_write_same_max_show,
594};
595
1308835f
BZ
596static struct queue_sysfs_entry queue_nonrot_entry = {
597 .attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR },
956bcb7c
JA
598 .show = queue_show_nonrot,
599 .store = queue_store_nonrot,
1308835f
BZ
600};
601
797476b8
DLM
602static struct queue_sysfs_entry queue_zoned_entry = {
603 .attr = {.name = "zoned", .mode = S_IRUGO },
604 .show = queue_zoned_show,
605};
606
ac9fafa1
AB
607static struct queue_sysfs_entry queue_nomerges_entry = {
608 .attr = {.name = "nomerges", .mode = S_IRUGO | S_IWUSR },
609 .show = queue_nomerges_show,
610 .store = queue_nomerges_store,
611};
612
c7c22e4d
JA
613static struct queue_sysfs_entry queue_rq_affinity_entry = {
614 .attr = {.name = "rq_affinity", .mode = S_IRUGO | S_IWUSR },
615 .show = queue_rq_affinity_show,
616 .store = queue_rq_affinity_store,
617};
618
bc58ba94
JA
619static struct queue_sysfs_entry queue_iostats_entry = {
620 .attr = {.name = "iostats", .mode = S_IRUGO | S_IWUSR },
956bcb7c
JA
621 .show = queue_show_iostats,
622 .store = queue_store_iostats,
bc58ba94
JA
623};
624
e2e1a148
JA
625static struct queue_sysfs_entry queue_random_entry = {
626 .attr = {.name = "add_random", .mode = S_IRUGO | S_IWUSR },
956bcb7c
JA
627 .show = queue_show_random,
628 .store = queue_store_random,
e2e1a148
JA
629};
630
05229bee
JA
631static struct queue_sysfs_entry queue_poll_entry = {
632 .attr = {.name = "io_poll", .mode = S_IRUGO | S_IWUSR },
633 .show = queue_poll_show,
634 .store = queue_poll_store,
635};
636
06426adf
JA
637static struct queue_sysfs_entry queue_poll_delay_entry = {
638 .attr = {.name = "io_poll_delay", .mode = S_IRUGO | S_IWUSR },
639 .show = queue_poll_delay_show,
640 .store = queue_poll_delay_store,
641};
642
93e9d8e8
JA
643static struct queue_sysfs_entry queue_wc_entry = {
644 .attr = {.name = "write_cache", .mode = S_IRUGO | S_IWUSR },
645 .show = queue_wc_show,
646 .store = queue_wc_store,
647};
648
ea6ca600
YK
649static struct queue_sysfs_entry queue_dax_entry = {
650 .attr = {.name = "dax", .mode = S_IRUGO },
651 .show = queue_dax_show,
652};
653
cf43e6be
JA
654static struct queue_sysfs_entry queue_stats_entry = {
655 .attr = {.name = "stats", .mode = S_IRUGO },
656 .show = queue_stats_show,
657};
658
87760e5e
JA
659static struct queue_sysfs_entry queue_wb_lat_entry = {
660 .attr = {.name = "wbt_lat_usec", .mode = S_IRUGO | S_IWUSR },
661 .show = queue_wb_lat_show,
662 .store = queue_wb_lat_store,
663};
664
8324aa91
JA
665static struct attribute *default_attrs[] = {
666 &queue_requests_entry.attr,
667 &queue_ra_entry.attr,
668 &queue_max_hw_sectors_entry.attr,
669 &queue_max_sectors_entry.attr,
c77a5710 670 &queue_max_segments_entry.attr,
13f05c8d 671 &queue_max_integrity_segments_entry.attr,
c77a5710 672 &queue_max_segment_size_entry.attr,
8324aa91 673 &queue_iosched_entry.attr,
e68b903c 674 &queue_hw_sector_size_entry.attr,
e1defc4f 675 &queue_logical_block_size_entry.attr,
c72758f3 676 &queue_physical_block_size_entry.attr,
87caf97c 677 &queue_chunk_sectors_entry.attr,
c72758f3
MP
678 &queue_io_min_entry.attr,
679 &queue_io_opt_entry.attr,
86b37281
MP
680 &queue_discard_granularity_entry.attr,
681 &queue_discard_max_entry.attr,
0034af03 682 &queue_discard_max_hw_entry.attr,
98262f27 683 &queue_discard_zeroes_data_entry.attr,
4363ac7c 684 &queue_write_same_max_entry.attr,
1308835f 685 &queue_nonrot_entry.attr,
797476b8 686 &queue_zoned_entry.attr,
ac9fafa1 687 &queue_nomerges_entry.attr,
c7c22e4d 688 &queue_rq_affinity_entry.attr,
bc58ba94 689 &queue_iostats_entry.attr,
e2e1a148 690 &queue_random_entry.attr,
05229bee 691 &queue_poll_entry.attr,
93e9d8e8 692 &queue_wc_entry.attr,
ea6ca600 693 &queue_dax_entry.attr,
cf43e6be 694 &queue_stats_entry.attr,
87760e5e 695 &queue_wb_lat_entry.attr,
06426adf 696 &queue_poll_delay_entry.attr,
8324aa91
JA
697 NULL,
698};
699
700#define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
701
702static ssize_t
703queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
704{
705 struct queue_sysfs_entry *entry = to_queue(attr);
706 struct request_queue *q =
707 container_of(kobj, struct request_queue, kobj);
708 ssize_t res;
709
710 if (!entry->show)
711 return -EIO;
712 mutex_lock(&q->sysfs_lock);
3f3299d5 713 if (blk_queue_dying(q)) {
8324aa91
JA
714 mutex_unlock(&q->sysfs_lock);
715 return -ENOENT;
716 }
717 res = entry->show(q, page);
718 mutex_unlock(&q->sysfs_lock);
719 return res;
720}
721
722static ssize_t
723queue_attr_store(struct kobject *kobj, struct attribute *attr,
724 const char *page, size_t length)
725{
726 struct queue_sysfs_entry *entry = to_queue(attr);
6728cb0e 727 struct request_queue *q;
8324aa91
JA
728 ssize_t res;
729
730 if (!entry->store)
731 return -EIO;
6728cb0e
JA
732
733 q = container_of(kobj, struct request_queue, kobj);
8324aa91 734 mutex_lock(&q->sysfs_lock);
3f3299d5 735 if (blk_queue_dying(q)) {
8324aa91
JA
736 mutex_unlock(&q->sysfs_lock);
737 return -ENOENT;
738 }
739 res = entry->store(q, page, length);
740 mutex_unlock(&q->sysfs_lock);
741 return res;
742}
743
548bc8e1
TH
744static void blk_free_queue_rcu(struct rcu_head *rcu_head)
745{
746 struct request_queue *q = container_of(rcu_head, struct request_queue,
747 rcu_head);
748 kmem_cache_free(blk_requestq_cachep, q);
749}
750
8324aa91 751/**
499337bb
AM
752 * blk_release_queue: - release a &struct request_queue when it is no longer needed
753 * @kobj: the kobj belonging to the request queue to be released
8324aa91
JA
754 *
755 * Description:
499337bb 756 * blk_release_queue is the pair to blk_init_queue() or
8324aa91
JA
757 * blk_queue_make_request(). It should be called when a request queue is
758 * being released; typically when a block device is being de-registered.
759 * Currently, its primary task it to free all the &struct request
760 * structures that were allocated to the queue and the queue itself.
761 *
45a9c9d9
BVA
762 * Note:
763 * The low level driver must have finished any outstanding requests first
764 * via blk_cleanup_queue().
8324aa91
JA
765 **/
766static void blk_release_queue(struct kobject *kobj)
767{
768 struct request_queue *q =
769 container_of(kobj, struct request_queue, kobj);
8324aa91 770
87760e5e 771 wbt_exit(q);
b02176f3 772 bdi_exit(&q->backing_dev_info);
e8989fae
TH
773 blkcg_exit_queue(q);
774
7e5a8794
TH
775 if (q->elevator) {
776 spin_lock_irq(q->queue_lock);
777 ioc_clear_queue(q);
778 spin_unlock_irq(q->queue_lock);
777eb1bf 779 elevator_exit(q->elevator);
7e5a8794 780 }
777eb1bf 781
a051661c 782 blk_exit_rl(&q->root_rl);
8324aa91
JA
783
784 if (q->queue_tags)
785 __blk_queue_free_tags(q);
786
45a9c9d9 787 if (!q->mq_ops)
f70ced09 788 blk_free_flush_queue(q->fq);
e09aae7e
ML
789 else
790 blk_mq_release(q);
18741986 791
8324aa91
JA
792 blk_trace_shutdown(q);
793
54efd50b
KO
794 if (q->bio_split)
795 bioset_free(q->bio_split);
796
a73f730d 797 ida_simple_remove(&blk_queue_ida, q->id);
548bc8e1 798 call_rcu(&q->rcu_head, blk_free_queue_rcu);
8324aa91
JA
799}
800
52cf25d0 801static const struct sysfs_ops queue_sysfs_ops = {
8324aa91
JA
802 .show = queue_attr_show,
803 .store = queue_attr_store,
804};
805
806struct kobj_type blk_queue_ktype = {
807 .sysfs_ops = &queue_sysfs_ops,
808 .default_attrs = default_attrs,
809 .release = blk_release_queue,
810};
811
87760e5e
JA
812static void blk_wb_init(struct request_queue *q)
813{
814#ifndef CONFIG_BLK_WBT_MQ
815 if (q->mq_ops)
816 return;
817#endif
818#ifndef CONFIG_BLK_WBT_SQ
819 if (q->request_fn)
820 return;
821#endif
822
823 /*
824 * If this fails, we don't get throttling
825 */
8054b89f 826 wbt_init(q);
87760e5e
JA
827}
828
8324aa91
JA
829int blk_register_queue(struct gendisk *disk)
830{
831 int ret;
1d54ad6d 832 struct device *dev = disk_to_dev(disk);
8324aa91
JA
833 struct request_queue *q = disk->queue;
834
fb199746 835 if (WARN_ON(!q))
8324aa91
JA
836 return -ENXIO;
837
749fefe6 838 /*
17497acb
TH
839 * SCSI probing may synchronously create and destroy a lot of
840 * request_queues for non-existent devices. Shutting down a fully
841 * functional queue takes measureable wallclock time as RCU grace
842 * periods are involved. To avoid excessive latency in these
843 * cases, a request_queue starts out in a degraded mode which is
844 * faster to shut down and is made fully functional here as
845 * request_queues for non-existent devices never get registered.
749fefe6 846 */
df35c7c9
AS
847 if (!blk_queue_init_done(q)) {
848 queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q);
3ef28e83 849 percpu_ref_switch_to_percpu(&q->q_usage_counter);
df35c7c9
AS
850 blk_queue_bypass_end(q);
851 }
749fefe6 852
1d54ad6d
LZ
853 ret = blk_trace_init_sysfs(dev);
854 if (ret)
855 return ret;
856
c9059598 857 ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue");
ed5302d3
LY
858 if (ret < 0) {
859 blk_trace_remove_sysfs(dev);
8324aa91 860 return ret;
ed5302d3 861 }
8324aa91
JA
862
863 kobject_uevent(&q->kobj, KOBJ_ADD);
864
320ae51f 865 if (q->mq_ops)
b21d5b30 866 blk_mq_register_dev(dev, q);
320ae51f 867
87760e5e
JA
868 blk_wb_init(q);
869
cd43e26f
MP
870 if (!q->request_fn)
871 return 0;
872
8324aa91
JA
873 ret = elv_register_queue(q);
874 if (ret) {
875 kobject_uevent(&q->kobj, KOBJ_REMOVE);
876 kobject_del(&q->kobj);
80656b67 877 blk_trace_remove_sysfs(dev);
c87ffbb8 878 kobject_put(&dev->kobj);
8324aa91
JA
879 return ret;
880 }
881
882 return 0;
883}
884
885void blk_unregister_queue(struct gendisk *disk)
886{
887 struct request_queue *q = disk->queue;
888
fb199746
AM
889 if (WARN_ON(!q))
890 return;
891
320ae51f 892 if (q->mq_ops)
b21d5b30 893 blk_mq_unregister_dev(disk_to_dev(disk), q);
320ae51f 894
48c0d4d4 895 if (q->request_fn)
8324aa91
JA
896 elv_unregister_queue(q);
897
48c0d4d4
ZK
898 kobject_uevent(&q->kobj, KOBJ_REMOVE);
899 kobject_del(&q->kobj);
900 blk_trace_remove_sysfs(disk_to_dev(disk));
901 kobject_put(&disk_to_dev(disk)->kobj);
8324aa91 902}