Commit | Line | Data |
---|---|---|
8324aa91 JA |
1 | /* |
2 | * Functions related to sysfs handling | |
3 | */ | |
4 | #include <linux/kernel.h> | |
5a0e3ad6 | 5 | #include <linux/slab.h> |
8324aa91 JA |
6 | #include <linux/module.h> |
7 | #include <linux/bio.h> | |
8 | #include <linux/blkdev.h> | |
66114cad | 9 | #include <linux/backing-dev.h> |
8324aa91 | 10 | #include <linux/blktrace_api.h> |
320ae51f | 11 | #include <linux/blk-mq.h> |
eea8f41c | 12 | #include <linux/blk-cgroup.h> |
8324aa91 JA |
13 | |
14 | #include "blk.h" | |
3edcc0ce | 15 | #include "blk-mq.h" |
8324aa91 JA |
16 | |
17 | struct queue_sysfs_entry { | |
18 | struct attribute attr; | |
19 | ssize_t (*show)(struct request_queue *, char *); | |
20 | ssize_t (*store)(struct request_queue *, const char *, size_t); | |
21 | }; | |
22 | ||
23 | static ssize_t | |
9cb308ce | 24 | queue_var_show(unsigned long var, char *page) |
8324aa91 | 25 | { |
9cb308ce | 26 | return sprintf(page, "%lu\n", var); |
8324aa91 JA |
27 | } |
28 | ||
29 | static ssize_t | |
30 | queue_var_store(unsigned long *var, const char *page, size_t count) | |
31 | { | |
b1f3b64d DR |
32 | int err; |
33 | unsigned long v; | |
34 | ||
ed751e68 | 35 | err = kstrtoul(page, 10, &v); |
b1f3b64d DR |
36 | if (err || v > UINT_MAX) |
37 | return -EINVAL; | |
38 | ||
39 | *var = v; | |
8324aa91 | 40 | |
8324aa91 JA |
41 | return count; |
42 | } | |
43 | ||
44 | static ssize_t queue_requests_show(struct request_queue *q, char *page) | |
45 | { | |
46 | return queue_var_show(q->nr_requests, (page)); | |
47 | } | |
48 | ||
49 | static ssize_t | |
50 | queue_requests_store(struct request_queue *q, const char *page, size_t count) | |
51 | { | |
8324aa91 | 52 | unsigned long nr; |
e3a2b3f9 | 53 | int ret, err; |
b8a9ae77 | 54 | |
e3a2b3f9 | 55 | if (!q->request_fn && !q->mq_ops) |
b8a9ae77 JA |
56 | return -EINVAL; |
57 | ||
58 | ret = queue_var_store(&nr, page, count); | |
b1f3b64d DR |
59 | if (ret < 0) |
60 | return ret; | |
61 | ||
8324aa91 JA |
62 | if (nr < BLKDEV_MIN_RQ) |
63 | nr = BLKDEV_MIN_RQ; | |
64 | ||
e3a2b3f9 JA |
65 | if (q->request_fn) |
66 | err = blk_update_nr_requests(q, nr); | |
67 | else | |
68 | err = blk_mq_update_nr_requests(q, nr); | |
69 | ||
70 | if (err) | |
71 | return err; | |
72 | ||
8324aa91 JA |
73 | return ret; |
74 | } | |
75 | ||
76 | static ssize_t queue_ra_show(struct request_queue *q, char *page) | |
77 | { | |
9cb308ce | 78 | unsigned long ra_kb = q->backing_dev_info.ra_pages << |
09cbfeaf | 79 | (PAGE_SHIFT - 10); |
8324aa91 JA |
80 | |
81 | return queue_var_show(ra_kb, (page)); | |
82 | } | |
83 | ||
84 | static ssize_t | |
85 | queue_ra_store(struct request_queue *q, const char *page, size_t count) | |
86 | { | |
87 | unsigned long ra_kb; | |
88 | ssize_t ret = queue_var_store(&ra_kb, page, count); | |
89 | ||
b1f3b64d DR |
90 | if (ret < 0) |
91 | return ret; | |
92 | ||
09cbfeaf | 93 | q->backing_dev_info.ra_pages = ra_kb >> (PAGE_SHIFT - 10); |
8324aa91 JA |
94 | |
95 | return ret; | |
96 | } | |
97 | ||
98 | static ssize_t queue_max_sectors_show(struct request_queue *q, char *page) | |
99 | { | |
ae03bf63 | 100 | int max_sectors_kb = queue_max_sectors(q) >> 1; |
8324aa91 JA |
101 | |
102 | return queue_var_show(max_sectors_kb, (page)); | |
103 | } | |
104 | ||
c77a5710 MP |
105 | static ssize_t queue_max_segments_show(struct request_queue *q, char *page) |
106 | { | |
107 | return queue_var_show(queue_max_segments(q), (page)); | |
108 | } | |
109 | ||
13f05c8d MP |
110 | static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page) |
111 | { | |
112 | return queue_var_show(q->limits.max_integrity_segments, (page)); | |
113 | } | |
114 | ||
c77a5710 MP |
115 | static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page) |
116 | { | |
e692cb66 | 117 | if (blk_queue_cluster(q)) |
c77a5710 MP |
118 | return queue_var_show(queue_max_segment_size(q), (page)); |
119 | ||
09cbfeaf | 120 | return queue_var_show(PAGE_SIZE, (page)); |
c77a5710 MP |
121 | } |
122 | ||
e1defc4f | 123 | static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page) |
e68b903c | 124 | { |
e1defc4f | 125 | return queue_var_show(queue_logical_block_size(q), page); |
e68b903c MP |
126 | } |
127 | ||
c72758f3 MP |
128 | static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page) |
129 | { | |
130 | return queue_var_show(queue_physical_block_size(q), page); | |
131 | } | |
132 | ||
133 | static ssize_t queue_io_min_show(struct request_queue *q, char *page) | |
134 | { | |
135 | return queue_var_show(queue_io_min(q), page); | |
136 | } | |
137 | ||
138 | static ssize_t queue_io_opt_show(struct request_queue *q, char *page) | |
139 | { | |
140 | return queue_var_show(queue_io_opt(q), page); | |
e68b903c MP |
141 | } |
142 | ||
86b37281 MP |
143 | static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page) |
144 | { | |
145 | return queue_var_show(q->limits.discard_granularity, page); | |
146 | } | |
147 | ||
0034af03 JA |
148 | static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page) |
149 | { | |
0034af03 | 150 | |
18f922d0 A |
151 | return sprintf(page, "%llu\n", |
152 | (unsigned long long)q->limits.max_hw_discard_sectors << 9); | |
0034af03 JA |
153 | } |
154 | ||
86b37281 MP |
155 | static ssize_t queue_discard_max_show(struct request_queue *q, char *page) |
156 | { | |
a934a00a MP |
157 | return sprintf(page, "%llu\n", |
158 | (unsigned long long)q->limits.max_discard_sectors << 9); | |
86b37281 MP |
159 | } |
160 | ||
0034af03 JA |
161 | static ssize_t queue_discard_max_store(struct request_queue *q, |
162 | const char *page, size_t count) | |
163 | { | |
164 | unsigned long max_discard; | |
165 | ssize_t ret = queue_var_store(&max_discard, page, count); | |
166 | ||
167 | if (ret < 0) | |
168 | return ret; | |
169 | ||
170 | if (max_discard & (q->limits.discard_granularity - 1)) | |
171 | return -EINVAL; | |
172 | ||
173 | max_discard >>= 9; | |
174 | if (max_discard > UINT_MAX) | |
175 | return -EINVAL; | |
176 | ||
177 | if (max_discard > q->limits.max_hw_discard_sectors) | |
178 | max_discard = q->limits.max_hw_discard_sectors; | |
179 | ||
180 | q->limits.max_discard_sectors = max_discard; | |
181 | return ret; | |
182 | } | |
183 | ||
98262f27 MP |
184 | static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page) |
185 | { | |
186 | return queue_var_show(queue_discard_zeroes_data(q), page); | |
187 | } | |
188 | ||
4363ac7c MP |
189 | static ssize_t queue_write_same_max_show(struct request_queue *q, char *page) |
190 | { | |
191 | return sprintf(page, "%llu\n", | |
192 | (unsigned long long)q->limits.max_write_same_sectors << 9); | |
193 | } | |
194 | ||
195 | ||
8324aa91 JA |
196 | static ssize_t |
197 | queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) | |
198 | { | |
199 | unsigned long max_sectors_kb, | |
ae03bf63 | 200 | max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1, |
09cbfeaf | 201 | page_kb = 1 << (PAGE_SHIFT - 10); |
8324aa91 JA |
202 | ssize_t ret = queue_var_store(&max_sectors_kb, page, count); |
203 | ||
b1f3b64d DR |
204 | if (ret < 0) |
205 | return ret; | |
206 | ||
ca369d51 MP |
207 | max_hw_sectors_kb = min_not_zero(max_hw_sectors_kb, (unsigned long) |
208 | q->limits.max_dev_sectors >> 1); | |
209 | ||
8324aa91 JA |
210 | if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb) |
211 | return -EINVAL; | |
7c239517 | 212 | |
8324aa91 | 213 | spin_lock_irq(q->queue_lock); |
c295fc05 | 214 | q->limits.max_sectors = max_sectors_kb << 1; |
8324aa91 JA |
215 | spin_unlock_irq(q->queue_lock); |
216 | ||
217 | return ret; | |
218 | } | |
219 | ||
220 | static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page) | |
221 | { | |
ae03bf63 | 222 | int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1; |
8324aa91 JA |
223 | |
224 | return queue_var_show(max_hw_sectors_kb, (page)); | |
225 | } | |
226 | ||
956bcb7c JA |
227 | #define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \ |
228 | static ssize_t \ | |
229 | queue_show_##name(struct request_queue *q, char *page) \ | |
230 | { \ | |
231 | int bit; \ | |
232 | bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \ | |
233 | return queue_var_show(neg ? !bit : bit, page); \ | |
234 | } \ | |
235 | static ssize_t \ | |
236 | queue_store_##name(struct request_queue *q, const char *page, size_t count) \ | |
237 | { \ | |
238 | unsigned long val; \ | |
239 | ssize_t ret; \ | |
240 | ret = queue_var_store(&val, page, count); \ | |
c678ef52 AB |
241 | if (ret < 0) \ |
242 | return ret; \ | |
956bcb7c JA |
243 | if (neg) \ |
244 | val = !val; \ | |
245 | \ | |
246 | spin_lock_irq(q->queue_lock); \ | |
247 | if (val) \ | |
248 | queue_flag_set(QUEUE_FLAG_##flag, q); \ | |
249 | else \ | |
250 | queue_flag_clear(QUEUE_FLAG_##flag, q); \ | |
251 | spin_unlock_irq(q->queue_lock); \ | |
252 | return ret; \ | |
1308835f BZ |
253 | } |
254 | ||
956bcb7c JA |
255 | QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1); |
256 | QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0); | |
257 | QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0); | |
258 | #undef QUEUE_SYSFS_BIT_FNS | |
1308835f | 259 | |
ac9fafa1 AB |
260 | static ssize_t queue_nomerges_show(struct request_queue *q, char *page) |
261 | { | |
488991e2 AB |
262 | return queue_var_show((blk_queue_nomerges(q) << 1) | |
263 | blk_queue_noxmerges(q), page); | |
ac9fafa1 AB |
264 | } |
265 | ||
266 | static ssize_t queue_nomerges_store(struct request_queue *q, const char *page, | |
267 | size_t count) | |
268 | { | |
269 | unsigned long nm; | |
270 | ssize_t ret = queue_var_store(&nm, page, count); | |
271 | ||
b1f3b64d DR |
272 | if (ret < 0) |
273 | return ret; | |
274 | ||
bf0f9702 | 275 | spin_lock_irq(q->queue_lock); |
488991e2 AB |
276 | queue_flag_clear(QUEUE_FLAG_NOMERGES, q); |
277 | queue_flag_clear(QUEUE_FLAG_NOXMERGES, q); | |
278 | if (nm == 2) | |
bf0f9702 | 279 | queue_flag_set(QUEUE_FLAG_NOMERGES, q); |
488991e2 AB |
280 | else if (nm) |
281 | queue_flag_set(QUEUE_FLAG_NOXMERGES, q); | |
bf0f9702 | 282 | spin_unlock_irq(q->queue_lock); |
1308835f | 283 | |
ac9fafa1 AB |
284 | return ret; |
285 | } | |
286 | ||
c7c22e4d JA |
287 | static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page) |
288 | { | |
9cb308ce | 289 | bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags); |
5757a6d7 | 290 | bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags); |
c7c22e4d | 291 | |
5757a6d7 | 292 | return queue_var_show(set << force, page); |
c7c22e4d JA |
293 | } |
294 | ||
295 | static ssize_t | |
296 | queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count) | |
297 | { | |
298 | ssize_t ret = -EINVAL; | |
0a06ff06 | 299 | #ifdef CONFIG_SMP |
c7c22e4d JA |
300 | unsigned long val; |
301 | ||
302 | ret = queue_var_store(&val, page, count); | |
b1f3b64d DR |
303 | if (ret < 0) |
304 | return ret; | |
305 | ||
c7c22e4d | 306 | spin_lock_irq(q->queue_lock); |
e8037d49 | 307 | if (val == 2) { |
c7c22e4d | 308 | queue_flag_set(QUEUE_FLAG_SAME_COMP, q); |
e8037d49 ES |
309 | queue_flag_set(QUEUE_FLAG_SAME_FORCE, q); |
310 | } else if (val == 1) { | |
311 | queue_flag_set(QUEUE_FLAG_SAME_COMP, q); | |
312 | queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); | |
313 | } else if (val == 0) { | |
5757a6d7 DW |
314 | queue_flag_clear(QUEUE_FLAG_SAME_COMP, q); |
315 | queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); | |
316 | } | |
c7c22e4d JA |
317 | spin_unlock_irq(q->queue_lock); |
318 | #endif | |
319 | return ret; | |
320 | } | |
8324aa91 | 321 | |
05229bee JA |
322 | static ssize_t queue_poll_show(struct request_queue *q, char *page) |
323 | { | |
324 | return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page); | |
325 | } | |
326 | ||
327 | static ssize_t queue_poll_store(struct request_queue *q, const char *page, | |
328 | size_t count) | |
329 | { | |
330 | unsigned long poll_on; | |
331 | ssize_t ret; | |
332 | ||
333 | if (!q->mq_ops || !q->mq_ops->poll) | |
334 | return -EINVAL; | |
335 | ||
336 | ret = queue_var_store(&poll_on, page, count); | |
337 | if (ret < 0) | |
338 | return ret; | |
339 | ||
340 | spin_lock_irq(q->queue_lock); | |
341 | if (poll_on) | |
342 | queue_flag_set(QUEUE_FLAG_POLL, q); | |
343 | else | |
344 | queue_flag_clear(QUEUE_FLAG_POLL, q); | |
345 | spin_unlock_irq(q->queue_lock); | |
346 | ||
347 | return ret; | |
348 | } | |
349 | ||
93e9d8e8 JA |
350 | static ssize_t queue_wc_show(struct request_queue *q, char *page) |
351 | { | |
352 | if (test_bit(QUEUE_FLAG_WC, &q->queue_flags)) | |
353 | return sprintf(page, "write back\n"); | |
354 | ||
355 | return sprintf(page, "write through\n"); | |
356 | } | |
357 | ||
358 | static ssize_t queue_wc_store(struct request_queue *q, const char *page, | |
359 | size_t count) | |
360 | { | |
361 | int set = -1; | |
362 | ||
363 | if (!strncmp(page, "write back", 10)) | |
364 | set = 1; | |
365 | else if (!strncmp(page, "write through", 13) || | |
366 | !strncmp(page, "none", 4)) | |
367 | set = 0; | |
368 | ||
369 | if (set == -1) | |
370 | return -EINVAL; | |
371 | ||
372 | spin_lock_irq(q->queue_lock); | |
373 | if (set) | |
374 | queue_flag_set(QUEUE_FLAG_WC, q); | |
375 | else | |
376 | queue_flag_clear(QUEUE_FLAG_WC, q); | |
377 | spin_unlock_irq(q->queue_lock); | |
378 | ||
379 | return count; | |
380 | } | |
381 | ||
ea6ca600 YK |
382 | static ssize_t queue_dax_show(struct request_queue *q, char *page) |
383 | { | |
384 | return queue_var_show(blk_queue_dax(q), page); | |
385 | } | |
386 | ||
8324aa91 JA |
387 | static struct queue_sysfs_entry queue_requests_entry = { |
388 | .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR }, | |
389 | .show = queue_requests_show, | |
390 | .store = queue_requests_store, | |
391 | }; | |
392 | ||
393 | static struct queue_sysfs_entry queue_ra_entry = { | |
394 | .attr = {.name = "read_ahead_kb", .mode = S_IRUGO | S_IWUSR }, | |
395 | .show = queue_ra_show, | |
396 | .store = queue_ra_store, | |
397 | }; | |
398 | ||
399 | static struct queue_sysfs_entry queue_max_sectors_entry = { | |
400 | .attr = {.name = "max_sectors_kb", .mode = S_IRUGO | S_IWUSR }, | |
401 | .show = queue_max_sectors_show, | |
402 | .store = queue_max_sectors_store, | |
403 | }; | |
404 | ||
405 | static struct queue_sysfs_entry queue_max_hw_sectors_entry = { | |
406 | .attr = {.name = "max_hw_sectors_kb", .mode = S_IRUGO }, | |
407 | .show = queue_max_hw_sectors_show, | |
408 | }; | |
409 | ||
c77a5710 MP |
410 | static struct queue_sysfs_entry queue_max_segments_entry = { |
411 | .attr = {.name = "max_segments", .mode = S_IRUGO }, | |
412 | .show = queue_max_segments_show, | |
413 | }; | |
414 | ||
13f05c8d MP |
415 | static struct queue_sysfs_entry queue_max_integrity_segments_entry = { |
416 | .attr = {.name = "max_integrity_segments", .mode = S_IRUGO }, | |
417 | .show = queue_max_integrity_segments_show, | |
418 | }; | |
419 | ||
c77a5710 MP |
420 | static struct queue_sysfs_entry queue_max_segment_size_entry = { |
421 | .attr = {.name = "max_segment_size", .mode = S_IRUGO }, | |
422 | .show = queue_max_segment_size_show, | |
423 | }; | |
424 | ||
8324aa91 JA |
425 | static struct queue_sysfs_entry queue_iosched_entry = { |
426 | .attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR }, | |
427 | .show = elv_iosched_show, | |
428 | .store = elv_iosched_store, | |
429 | }; | |
430 | ||
e68b903c MP |
431 | static struct queue_sysfs_entry queue_hw_sector_size_entry = { |
432 | .attr = {.name = "hw_sector_size", .mode = S_IRUGO }, | |
e1defc4f MP |
433 | .show = queue_logical_block_size_show, |
434 | }; | |
435 | ||
436 | static struct queue_sysfs_entry queue_logical_block_size_entry = { | |
437 | .attr = {.name = "logical_block_size", .mode = S_IRUGO }, | |
438 | .show = queue_logical_block_size_show, | |
e68b903c MP |
439 | }; |
440 | ||
c72758f3 MP |
441 | static struct queue_sysfs_entry queue_physical_block_size_entry = { |
442 | .attr = {.name = "physical_block_size", .mode = S_IRUGO }, | |
443 | .show = queue_physical_block_size_show, | |
444 | }; | |
445 | ||
446 | static struct queue_sysfs_entry queue_io_min_entry = { | |
447 | .attr = {.name = "minimum_io_size", .mode = S_IRUGO }, | |
448 | .show = queue_io_min_show, | |
449 | }; | |
450 | ||
451 | static struct queue_sysfs_entry queue_io_opt_entry = { | |
452 | .attr = {.name = "optimal_io_size", .mode = S_IRUGO }, | |
453 | .show = queue_io_opt_show, | |
e68b903c MP |
454 | }; |
455 | ||
86b37281 MP |
456 | static struct queue_sysfs_entry queue_discard_granularity_entry = { |
457 | .attr = {.name = "discard_granularity", .mode = S_IRUGO }, | |
458 | .show = queue_discard_granularity_show, | |
459 | }; | |
460 | ||
0034af03 JA |
461 | static struct queue_sysfs_entry queue_discard_max_hw_entry = { |
462 | .attr = {.name = "discard_max_hw_bytes", .mode = S_IRUGO }, | |
463 | .show = queue_discard_max_hw_show, | |
464 | }; | |
465 | ||
86b37281 | 466 | static struct queue_sysfs_entry queue_discard_max_entry = { |
0034af03 | 467 | .attr = {.name = "discard_max_bytes", .mode = S_IRUGO | S_IWUSR }, |
86b37281 | 468 | .show = queue_discard_max_show, |
0034af03 | 469 | .store = queue_discard_max_store, |
86b37281 MP |
470 | }; |
471 | ||
98262f27 MP |
472 | static struct queue_sysfs_entry queue_discard_zeroes_data_entry = { |
473 | .attr = {.name = "discard_zeroes_data", .mode = S_IRUGO }, | |
474 | .show = queue_discard_zeroes_data_show, | |
475 | }; | |
476 | ||
4363ac7c MP |
477 | static struct queue_sysfs_entry queue_write_same_max_entry = { |
478 | .attr = {.name = "write_same_max_bytes", .mode = S_IRUGO }, | |
479 | .show = queue_write_same_max_show, | |
480 | }; | |
481 | ||
1308835f BZ |
482 | static struct queue_sysfs_entry queue_nonrot_entry = { |
483 | .attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR }, | |
956bcb7c JA |
484 | .show = queue_show_nonrot, |
485 | .store = queue_store_nonrot, | |
1308835f BZ |
486 | }; |
487 | ||
ac9fafa1 AB |
488 | static struct queue_sysfs_entry queue_nomerges_entry = { |
489 | .attr = {.name = "nomerges", .mode = S_IRUGO | S_IWUSR }, | |
490 | .show = queue_nomerges_show, | |
491 | .store = queue_nomerges_store, | |
492 | }; | |
493 | ||
c7c22e4d JA |
494 | static struct queue_sysfs_entry queue_rq_affinity_entry = { |
495 | .attr = {.name = "rq_affinity", .mode = S_IRUGO | S_IWUSR }, | |
496 | .show = queue_rq_affinity_show, | |
497 | .store = queue_rq_affinity_store, | |
498 | }; | |
499 | ||
bc58ba94 JA |
500 | static struct queue_sysfs_entry queue_iostats_entry = { |
501 | .attr = {.name = "iostats", .mode = S_IRUGO | S_IWUSR }, | |
956bcb7c JA |
502 | .show = queue_show_iostats, |
503 | .store = queue_store_iostats, | |
bc58ba94 JA |
504 | }; |
505 | ||
e2e1a148 JA |
506 | static struct queue_sysfs_entry queue_random_entry = { |
507 | .attr = {.name = "add_random", .mode = S_IRUGO | S_IWUSR }, | |
956bcb7c JA |
508 | .show = queue_show_random, |
509 | .store = queue_store_random, | |
e2e1a148 JA |
510 | }; |
511 | ||
05229bee JA |
512 | static struct queue_sysfs_entry queue_poll_entry = { |
513 | .attr = {.name = "io_poll", .mode = S_IRUGO | S_IWUSR }, | |
514 | .show = queue_poll_show, | |
515 | .store = queue_poll_store, | |
516 | }; | |
517 | ||
93e9d8e8 JA |
518 | static struct queue_sysfs_entry queue_wc_entry = { |
519 | .attr = {.name = "write_cache", .mode = S_IRUGO | S_IWUSR }, | |
520 | .show = queue_wc_show, | |
521 | .store = queue_wc_store, | |
522 | }; | |
523 | ||
ea6ca600 YK |
524 | static struct queue_sysfs_entry queue_dax_entry = { |
525 | .attr = {.name = "dax", .mode = S_IRUGO }, | |
526 | .show = queue_dax_show, | |
527 | }; | |
528 | ||
8324aa91 JA |
529 | static struct attribute *default_attrs[] = { |
530 | &queue_requests_entry.attr, | |
531 | &queue_ra_entry.attr, | |
532 | &queue_max_hw_sectors_entry.attr, | |
533 | &queue_max_sectors_entry.attr, | |
c77a5710 | 534 | &queue_max_segments_entry.attr, |
13f05c8d | 535 | &queue_max_integrity_segments_entry.attr, |
c77a5710 | 536 | &queue_max_segment_size_entry.attr, |
8324aa91 | 537 | &queue_iosched_entry.attr, |
e68b903c | 538 | &queue_hw_sector_size_entry.attr, |
e1defc4f | 539 | &queue_logical_block_size_entry.attr, |
c72758f3 MP |
540 | &queue_physical_block_size_entry.attr, |
541 | &queue_io_min_entry.attr, | |
542 | &queue_io_opt_entry.attr, | |
86b37281 MP |
543 | &queue_discard_granularity_entry.attr, |
544 | &queue_discard_max_entry.attr, | |
0034af03 | 545 | &queue_discard_max_hw_entry.attr, |
98262f27 | 546 | &queue_discard_zeroes_data_entry.attr, |
4363ac7c | 547 | &queue_write_same_max_entry.attr, |
1308835f | 548 | &queue_nonrot_entry.attr, |
ac9fafa1 | 549 | &queue_nomerges_entry.attr, |
c7c22e4d | 550 | &queue_rq_affinity_entry.attr, |
bc58ba94 | 551 | &queue_iostats_entry.attr, |
e2e1a148 | 552 | &queue_random_entry.attr, |
05229bee | 553 | &queue_poll_entry.attr, |
93e9d8e8 | 554 | &queue_wc_entry.attr, |
ea6ca600 | 555 | &queue_dax_entry.attr, |
8324aa91 JA |
556 | NULL, |
557 | }; | |
558 | ||
559 | #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr) | |
560 | ||
561 | static ssize_t | |
562 | queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page) | |
563 | { | |
564 | struct queue_sysfs_entry *entry = to_queue(attr); | |
565 | struct request_queue *q = | |
566 | container_of(kobj, struct request_queue, kobj); | |
567 | ssize_t res; | |
568 | ||
569 | if (!entry->show) | |
570 | return -EIO; | |
571 | mutex_lock(&q->sysfs_lock); | |
3f3299d5 | 572 | if (blk_queue_dying(q)) { |
8324aa91 JA |
573 | mutex_unlock(&q->sysfs_lock); |
574 | return -ENOENT; | |
575 | } | |
576 | res = entry->show(q, page); | |
577 | mutex_unlock(&q->sysfs_lock); | |
578 | return res; | |
579 | } | |
580 | ||
581 | static ssize_t | |
582 | queue_attr_store(struct kobject *kobj, struct attribute *attr, | |
583 | const char *page, size_t length) | |
584 | { | |
585 | struct queue_sysfs_entry *entry = to_queue(attr); | |
6728cb0e | 586 | struct request_queue *q; |
8324aa91 JA |
587 | ssize_t res; |
588 | ||
589 | if (!entry->store) | |
590 | return -EIO; | |
6728cb0e JA |
591 | |
592 | q = container_of(kobj, struct request_queue, kobj); | |
8324aa91 | 593 | mutex_lock(&q->sysfs_lock); |
3f3299d5 | 594 | if (blk_queue_dying(q)) { |
8324aa91 JA |
595 | mutex_unlock(&q->sysfs_lock); |
596 | return -ENOENT; | |
597 | } | |
598 | res = entry->store(q, page, length); | |
599 | mutex_unlock(&q->sysfs_lock); | |
600 | return res; | |
601 | } | |
602 | ||
548bc8e1 TH |
603 | static void blk_free_queue_rcu(struct rcu_head *rcu_head) |
604 | { | |
605 | struct request_queue *q = container_of(rcu_head, struct request_queue, | |
606 | rcu_head); | |
607 | kmem_cache_free(blk_requestq_cachep, q); | |
608 | } | |
609 | ||
8324aa91 | 610 | /** |
499337bb AM |
611 | * blk_release_queue: - release a &struct request_queue when it is no longer needed |
612 | * @kobj: the kobj belonging to the request queue to be released | |
8324aa91 JA |
613 | * |
614 | * Description: | |
499337bb | 615 | * blk_release_queue is the pair to blk_init_queue() or |
8324aa91 JA |
616 | * blk_queue_make_request(). It should be called when a request queue is |
617 | * being released; typically when a block device is being de-registered. | |
618 | * Currently, its primary task it to free all the &struct request | |
619 | * structures that were allocated to the queue and the queue itself. | |
620 | * | |
45a9c9d9 BVA |
621 | * Note: |
622 | * The low level driver must have finished any outstanding requests first | |
623 | * via blk_cleanup_queue(). | |
8324aa91 JA |
624 | **/ |
625 | static void blk_release_queue(struct kobject *kobj) | |
626 | { | |
627 | struct request_queue *q = | |
628 | container_of(kobj, struct request_queue, kobj); | |
8324aa91 | 629 | |
b02176f3 | 630 | bdi_exit(&q->backing_dev_info); |
e8989fae TH |
631 | blkcg_exit_queue(q); |
632 | ||
7e5a8794 TH |
633 | if (q->elevator) { |
634 | spin_lock_irq(q->queue_lock); | |
635 | ioc_clear_queue(q); | |
636 | spin_unlock_irq(q->queue_lock); | |
777eb1bf | 637 | elevator_exit(q->elevator); |
7e5a8794 | 638 | } |
777eb1bf | 639 | |
a051661c | 640 | blk_exit_rl(&q->root_rl); |
8324aa91 JA |
641 | |
642 | if (q->queue_tags) | |
643 | __blk_queue_free_tags(q); | |
644 | ||
45a9c9d9 | 645 | if (!q->mq_ops) |
f70ced09 | 646 | blk_free_flush_queue(q->fq); |
e09aae7e ML |
647 | else |
648 | blk_mq_release(q); | |
18741986 | 649 | |
8324aa91 JA |
650 | blk_trace_shutdown(q); |
651 | ||
54efd50b KO |
652 | if (q->bio_split) |
653 | bioset_free(q->bio_split); | |
654 | ||
a73f730d | 655 | ida_simple_remove(&blk_queue_ida, q->id); |
548bc8e1 | 656 | call_rcu(&q->rcu_head, blk_free_queue_rcu); |
8324aa91 JA |
657 | } |
658 | ||
52cf25d0 | 659 | static const struct sysfs_ops queue_sysfs_ops = { |
8324aa91 JA |
660 | .show = queue_attr_show, |
661 | .store = queue_attr_store, | |
662 | }; | |
663 | ||
664 | struct kobj_type blk_queue_ktype = { | |
665 | .sysfs_ops = &queue_sysfs_ops, | |
666 | .default_attrs = default_attrs, | |
667 | .release = blk_release_queue, | |
668 | }; | |
669 | ||
670 | int blk_register_queue(struct gendisk *disk) | |
671 | { | |
672 | int ret; | |
1d54ad6d | 673 | struct device *dev = disk_to_dev(disk); |
8324aa91 JA |
674 | struct request_queue *q = disk->queue; |
675 | ||
fb199746 | 676 | if (WARN_ON(!q)) |
8324aa91 JA |
677 | return -ENXIO; |
678 | ||
749fefe6 | 679 | /* |
17497acb TH |
680 | * SCSI probing may synchronously create and destroy a lot of |
681 | * request_queues for non-existent devices. Shutting down a fully | |
682 | * functional queue takes measureable wallclock time as RCU grace | |
683 | * periods are involved. To avoid excessive latency in these | |
684 | * cases, a request_queue starts out in a degraded mode which is | |
685 | * faster to shut down and is made fully functional here as | |
686 | * request_queues for non-existent devices never get registered. | |
749fefe6 | 687 | */ |
df35c7c9 AS |
688 | if (!blk_queue_init_done(q)) { |
689 | queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q); | |
3ef28e83 | 690 | percpu_ref_switch_to_percpu(&q->q_usage_counter); |
df35c7c9 AS |
691 | blk_queue_bypass_end(q); |
692 | } | |
749fefe6 | 693 | |
1d54ad6d LZ |
694 | ret = blk_trace_init_sysfs(dev); |
695 | if (ret) | |
696 | return ret; | |
697 | ||
c9059598 | 698 | ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue"); |
ed5302d3 LY |
699 | if (ret < 0) { |
700 | blk_trace_remove_sysfs(dev); | |
8324aa91 | 701 | return ret; |
ed5302d3 | 702 | } |
8324aa91 JA |
703 | |
704 | kobject_uevent(&q->kobj, KOBJ_ADD); | |
705 | ||
320ae51f | 706 | if (q->mq_ops) |
b21d5b30 | 707 | blk_mq_register_dev(dev, q); |
320ae51f | 708 | |
cd43e26f MP |
709 | if (!q->request_fn) |
710 | return 0; | |
711 | ||
8324aa91 JA |
712 | ret = elv_register_queue(q); |
713 | if (ret) { | |
714 | kobject_uevent(&q->kobj, KOBJ_REMOVE); | |
715 | kobject_del(&q->kobj); | |
80656b67 | 716 | blk_trace_remove_sysfs(dev); |
c87ffbb8 | 717 | kobject_put(&dev->kobj); |
8324aa91 JA |
718 | return ret; |
719 | } | |
720 | ||
721 | return 0; | |
722 | } | |
723 | ||
724 | void blk_unregister_queue(struct gendisk *disk) | |
725 | { | |
726 | struct request_queue *q = disk->queue; | |
727 | ||
fb199746 AM |
728 | if (WARN_ON(!q)) |
729 | return; | |
730 | ||
320ae51f | 731 | if (q->mq_ops) |
b21d5b30 | 732 | blk_mq_unregister_dev(disk_to_dev(disk), q); |
320ae51f | 733 | |
48c0d4d4 | 734 | if (q->request_fn) |
8324aa91 JA |
735 | elv_unregister_queue(q); |
736 | ||
48c0d4d4 ZK |
737 | kobject_uevent(&q->kobj, KOBJ_REMOVE); |
738 | kobject_del(&q->kobj); | |
739 | blk_trace_remove_sysfs(disk_to_dev(disk)); | |
740 | kobject_put(&disk_to_dev(disk)->kobj); | |
8324aa91 | 741 | } |