Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
8324aa91 JA |
2 | /* |
3 | * Functions related to sysfs handling | |
4 | */ | |
5 | #include <linux/kernel.h> | |
5a0e3ad6 | 6 | #include <linux/slab.h> |
8324aa91 JA |
7 | #include <linux/module.h> |
8 | #include <linux/bio.h> | |
9 | #include <linux/blkdev.h> | |
66114cad | 10 | #include <linux/backing-dev.h> |
8324aa91 | 11 | #include <linux/blktrace_api.h> |
320ae51f | 12 | #include <linux/blk-mq.h> |
eea8f41c | 13 | #include <linux/blk-cgroup.h> |
85e0cbbb | 14 | #include <linux/debugfs.h> |
8324aa91 JA |
15 | |
16 | #include "blk.h" | |
3edcc0ce | 17 | #include "blk-mq.h" |
d173a251 | 18 | #include "blk-mq-debugfs.h" |
87760e5e | 19 | #include "blk-wbt.h" |
8324aa91 JA |
20 | |
21 | struct queue_sysfs_entry { | |
22 | struct attribute attr; | |
23 | ssize_t (*show)(struct request_queue *, char *); | |
24 | ssize_t (*store)(struct request_queue *, const char *, size_t); | |
25 | }; | |
26 | ||
27 | static ssize_t | |
9cb308ce | 28 | queue_var_show(unsigned long var, char *page) |
8324aa91 | 29 | { |
9cb308ce | 30 | return sprintf(page, "%lu\n", var); |
8324aa91 JA |
31 | } |
32 | ||
33 | static ssize_t | |
34 | queue_var_store(unsigned long *var, const char *page, size_t count) | |
35 | { | |
b1f3b64d DR |
36 | int err; |
37 | unsigned long v; | |
38 | ||
ed751e68 | 39 | err = kstrtoul(page, 10, &v); |
b1f3b64d DR |
40 | if (err || v > UINT_MAX) |
41 | return -EINVAL; | |
42 | ||
43 | *var = v; | |
8324aa91 | 44 | |
8324aa91 JA |
45 | return count; |
46 | } | |
47 | ||
80e091d1 | 48 | static ssize_t queue_var_store64(s64 *var, const char *page) |
87760e5e JA |
49 | { |
50 | int err; | |
80e091d1 | 51 | s64 v; |
87760e5e | 52 | |
80e091d1 | 53 | err = kstrtos64(page, 10, &v); |
87760e5e JA |
54 | if (err < 0) |
55 | return err; | |
56 | ||
57 | *var = v; | |
58 | return 0; | |
59 | } | |
60 | ||
8324aa91 JA |
61 | static ssize_t queue_requests_show(struct request_queue *q, char *page) |
62 | { | |
28af7428 | 63 | return queue_var_show(q->nr_requests, page); |
8324aa91 JA |
64 | } |
65 | ||
66 | static ssize_t | |
67 | queue_requests_store(struct request_queue *q, const char *page, size_t count) | |
68 | { | |
8324aa91 | 69 | unsigned long nr; |
e3a2b3f9 | 70 | int ret, err; |
b8a9ae77 | 71 | |
344e9ffc | 72 | if (!queue_is_mq(q)) |
b8a9ae77 JA |
73 | return -EINVAL; |
74 | ||
75 | ret = queue_var_store(&nr, page, count); | |
b1f3b64d DR |
76 | if (ret < 0) |
77 | return ret; | |
78 | ||
8324aa91 JA |
79 | if (nr < BLKDEV_MIN_RQ) |
80 | nr = BLKDEV_MIN_RQ; | |
81 | ||
a1ce35fa | 82 | err = blk_mq_update_nr_requests(q, nr); |
e3a2b3f9 JA |
83 | if (err) |
84 | return err; | |
85 | ||
8324aa91 JA |
86 | return ret; |
87 | } | |
88 | ||
89 | static ssize_t queue_ra_show(struct request_queue *q, char *page) | |
90 | { | |
edb0872f | 91 | unsigned long ra_kb; |
8324aa91 | 92 | |
d152c682 | 93 | if (!q->disk) |
edb0872f | 94 | return -EINVAL; |
d152c682 | 95 | ra_kb = q->disk->bdi->ra_pages << (PAGE_SHIFT - 10); |
8c390ff9 | 96 | return queue_var_show(ra_kb, page); |
8324aa91 JA |
97 | } |
98 | ||
99 | static ssize_t | |
100 | queue_ra_store(struct request_queue *q, const char *page, size_t count) | |
101 | { | |
102 | unsigned long ra_kb; | |
edb0872f | 103 | ssize_t ret; |
8324aa91 | 104 | |
d152c682 | 105 | if (!q->disk) |
edb0872f CH |
106 | return -EINVAL; |
107 | ret = queue_var_store(&ra_kb, page, count); | |
b1f3b64d DR |
108 | if (ret < 0) |
109 | return ret; | |
d152c682 | 110 | q->disk->bdi->ra_pages = ra_kb >> (PAGE_SHIFT - 10); |
8324aa91 JA |
111 | return ret; |
112 | } | |
113 | ||
114 | static ssize_t queue_max_sectors_show(struct request_queue *q, char *page) | |
115 | { | |
ae03bf63 | 116 | int max_sectors_kb = queue_max_sectors(q) >> 1; |
8324aa91 | 117 | |
8c390ff9 | 118 | return queue_var_show(max_sectors_kb, page); |
8324aa91 JA |
119 | } |
120 | ||
c77a5710 MP |
121 | static ssize_t queue_max_segments_show(struct request_queue *q, char *page) |
122 | { | |
8c390ff9 | 123 | return queue_var_show(queue_max_segments(q), page); |
c77a5710 MP |
124 | } |
125 | ||
1e739730 CH |
126 | static ssize_t queue_max_discard_segments_show(struct request_queue *q, |
127 | char *page) | |
128 | { | |
8c390ff9 | 129 | return queue_var_show(queue_max_discard_segments(q), page); |
1e739730 CH |
130 | } |
131 | ||
13f05c8d MP |
132 | static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page) |
133 | { | |
8c390ff9 | 134 | return queue_var_show(q->limits.max_integrity_segments, page); |
13f05c8d MP |
135 | } |
136 | ||
c77a5710 MP |
137 | static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page) |
138 | { | |
8c390ff9 | 139 | return queue_var_show(queue_max_segment_size(q), page); |
c77a5710 MP |
140 | } |
141 | ||
e1defc4f | 142 | static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page) |
e68b903c | 143 | { |
e1defc4f | 144 | return queue_var_show(queue_logical_block_size(q), page); |
e68b903c MP |
145 | } |
146 | ||
c72758f3 MP |
147 | static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page) |
148 | { | |
149 | return queue_var_show(queue_physical_block_size(q), page); | |
150 | } | |
151 | ||
87caf97c HR |
152 | static ssize_t queue_chunk_sectors_show(struct request_queue *q, char *page) |
153 | { | |
154 | return queue_var_show(q->limits.chunk_sectors, page); | |
155 | } | |
156 | ||
c72758f3 MP |
157 | static ssize_t queue_io_min_show(struct request_queue *q, char *page) |
158 | { | |
159 | return queue_var_show(queue_io_min(q), page); | |
160 | } | |
161 | ||
162 | static ssize_t queue_io_opt_show(struct request_queue *q, char *page) | |
163 | { | |
164 | return queue_var_show(queue_io_opt(q), page); | |
e68b903c MP |
165 | } |
166 | ||
86b37281 MP |
167 | static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page) |
168 | { | |
169 | return queue_var_show(q->limits.discard_granularity, page); | |
170 | } | |
171 | ||
0034af03 JA |
172 | static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page) |
173 | { | |
0034af03 | 174 | |
18f922d0 A |
175 | return sprintf(page, "%llu\n", |
176 | (unsigned long long)q->limits.max_hw_discard_sectors << 9); | |
0034af03 JA |
177 | } |
178 | ||
86b37281 MP |
179 | static ssize_t queue_discard_max_show(struct request_queue *q, char *page) |
180 | { | |
a934a00a MP |
181 | return sprintf(page, "%llu\n", |
182 | (unsigned long long)q->limits.max_discard_sectors << 9); | |
86b37281 MP |
183 | } |
184 | ||
0034af03 JA |
185 | static ssize_t queue_discard_max_store(struct request_queue *q, |
186 | const char *page, size_t count) | |
187 | { | |
188 | unsigned long max_discard; | |
189 | ssize_t ret = queue_var_store(&max_discard, page, count); | |
190 | ||
191 | if (ret < 0) | |
192 | return ret; | |
193 | ||
194 | if (max_discard & (q->limits.discard_granularity - 1)) | |
195 | return -EINVAL; | |
196 | ||
197 | max_discard >>= 9; | |
198 | if (max_discard > UINT_MAX) | |
199 | return -EINVAL; | |
200 | ||
201 | if (max_discard > q->limits.max_hw_discard_sectors) | |
202 | max_discard = q->limits.max_hw_discard_sectors; | |
203 | ||
204 | q->limits.max_discard_sectors = max_discard; | |
205 | return ret; | |
206 | } | |
207 | ||
98262f27 MP |
208 | static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page) |
209 | { | |
48920ff2 | 210 | return queue_var_show(0, page); |
98262f27 MP |
211 | } |
212 | ||
4363ac7c MP |
213 | static ssize_t queue_write_same_max_show(struct request_queue *q, char *page) |
214 | { | |
215 | return sprintf(page, "%llu\n", | |
216 | (unsigned long long)q->limits.max_write_same_sectors << 9); | |
217 | } | |
218 | ||
a6f0788e CK |
219 | static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page) |
220 | { | |
221 | return sprintf(page, "%llu\n", | |
222 | (unsigned long long)q->limits.max_write_zeroes_sectors << 9); | |
223 | } | |
4363ac7c | 224 | |
a805a4fa DLM |
225 | static ssize_t queue_zone_write_granularity_show(struct request_queue *q, |
226 | char *page) | |
227 | { | |
228 | return queue_var_show(queue_zone_write_granularity(q), page); | |
229 | } | |
230 | ||
0512a75b KB |
231 | static ssize_t queue_zone_append_max_show(struct request_queue *q, char *page) |
232 | { | |
233 | unsigned long long max_sectors = q->limits.max_zone_append_sectors; | |
234 | ||
235 | return sprintf(page, "%llu\n", max_sectors << SECTOR_SHIFT); | |
236 | } | |
237 | ||
8324aa91 JA |
238 | static ssize_t |
239 | queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) | |
240 | { | |
241 | unsigned long max_sectors_kb, | |
ae03bf63 | 242 | max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1, |
09cbfeaf | 243 | page_kb = 1 << (PAGE_SHIFT - 10); |
8324aa91 JA |
244 | ssize_t ret = queue_var_store(&max_sectors_kb, page, count); |
245 | ||
b1f3b64d DR |
246 | if (ret < 0) |
247 | return ret; | |
248 | ||
ca369d51 MP |
249 | max_hw_sectors_kb = min_not_zero(max_hw_sectors_kb, (unsigned long) |
250 | q->limits.max_dev_sectors >> 1); | |
251 | ||
8324aa91 JA |
252 | if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb) |
253 | return -EINVAL; | |
7c239517 | 254 | |
0d945c1f | 255 | spin_lock_irq(&q->queue_lock); |
c295fc05 | 256 | q->limits.max_sectors = max_sectors_kb << 1; |
d152c682 CH |
257 | if (q->disk) |
258 | q->disk->bdi->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10); | |
0d945c1f | 259 | spin_unlock_irq(&q->queue_lock); |
8324aa91 JA |
260 | |
261 | return ret; | |
262 | } | |
263 | ||
264 | static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page) | |
265 | { | |
ae03bf63 | 266 | int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1; |
8324aa91 | 267 | |
8c390ff9 | 268 | return queue_var_show(max_hw_sectors_kb, page); |
8324aa91 JA |
269 | } |
270 | ||
28af7428 MG |
271 | static ssize_t queue_virt_boundary_mask_show(struct request_queue *q, char *page) |
272 | { | |
8c390ff9 | 273 | return queue_var_show(q->limits.virt_boundary_mask, page); |
28af7428 MG |
274 | } |
275 | ||
956bcb7c JA |
276 | #define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \ |
277 | static ssize_t \ | |
fc93fe14 | 278 | queue_##name##_show(struct request_queue *q, char *page) \ |
956bcb7c JA |
279 | { \ |
280 | int bit; \ | |
281 | bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \ | |
282 | return queue_var_show(neg ? !bit : bit, page); \ | |
283 | } \ | |
284 | static ssize_t \ | |
fc93fe14 | 285 | queue_##name##_store(struct request_queue *q, const char *page, size_t count) \ |
956bcb7c JA |
286 | { \ |
287 | unsigned long val; \ | |
288 | ssize_t ret; \ | |
289 | ret = queue_var_store(&val, page, count); \ | |
c678ef52 AB |
290 | if (ret < 0) \ |
291 | return ret; \ | |
956bcb7c JA |
292 | if (neg) \ |
293 | val = !val; \ | |
294 | \ | |
956bcb7c | 295 | if (val) \ |
8814ce8a | 296 | blk_queue_flag_set(QUEUE_FLAG_##flag, q); \ |
956bcb7c | 297 | else \ |
8814ce8a | 298 | blk_queue_flag_clear(QUEUE_FLAG_##flag, q); \ |
956bcb7c | 299 | return ret; \ |
1308835f BZ |
300 | } |
301 | ||
956bcb7c JA |
302 | QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1); |
303 | QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0); | |
304 | QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0); | |
1cb039f3 | 305 | QUEUE_SYSFS_BIT_FNS(stable_writes, STABLE_WRITES, 0); |
956bcb7c | 306 | #undef QUEUE_SYSFS_BIT_FNS |
1308835f | 307 | |
797476b8 DLM |
308 | static ssize_t queue_zoned_show(struct request_queue *q, char *page) |
309 | { | |
310 | switch (blk_queue_zoned_model(q)) { | |
311 | case BLK_ZONED_HA: | |
312 | return sprintf(page, "host-aware\n"); | |
313 | case BLK_ZONED_HM: | |
314 | return sprintf(page, "host-managed\n"); | |
315 | default: | |
316 | return sprintf(page, "none\n"); | |
317 | } | |
318 | } | |
319 | ||
965b652e DLM |
320 | static ssize_t queue_nr_zones_show(struct request_queue *q, char *page) |
321 | { | |
322 | return queue_var_show(blk_queue_nr_zones(q), page); | |
323 | } | |
324 | ||
e15864f8 NC |
325 | static ssize_t queue_max_open_zones_show(struct request_queue *q, char *page) |
326 | { | |
327 | return queue_var_show(queue_max_open_zones(q), page); | |
328 | } | |
329 | ||
659bf827 NC |
330 | static ssize_t queue_max_active_zones_show(struct request_queue *q, char *page) |
331 | { | |
332 | return queue_var_show(queue_max_active_zones(q), page); | |
333 | } | |
334 | ||
ac9fafa1 AB |
335 | static ssize_t queue_nomerges_show(struct request_queue *q, char *page) |
336 | { | |
488991e2 AB |
337 | return queue_var_show((blk_queue_nomerges(q) << 1) | |
338 | blk_queue_noxmerges(q), page); | |
ac9fafa1 AB |
339 | } |
340 | ||
341 | static ssize_t queue_nomerges_store(struct request_queue *q, const char *page, | |
342 | size_t count) | |
343 | { | |
344 | unsigned long nm; | |
345 | ssize_t ret = queue_var_store(&nm, page, count); | |
346 | ||
b1f3b64d DR |
347 | if (ret < 0) |
348 | return ret; | |
349 | ||
57d74df9 CH |
350 | blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, q); |
351 | blk_queue_flag_clear(QUEUE_FLAG_NOXMERGES, q); | |
488991e2 | 352 | if (nm == 2) |
57d74df9 | 353 | blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q); |
488991e2 | 354 | else if (nm) |
57d74df9 | 355 | blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q); |
1308835f | 356 | |
ac9fafa1 AB |
357 | return ret; |
358 | } | |
359 | ||
c7c22e4d JA |
360 | static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page) |
361 | { | |
9cb308ce | 362 | bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags); |
5757a6d7 | 363 | bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags); |
c7c22e4d | 364 | |
5757a6d7 | 365 | return queue_var_show(set << force, page); |
c7c22e4d JA |
366 | } |
367 | ||
368 | static ssize_t | |
369 | queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count) | |
370 | { | |
371 | ssize_t ret = -EINVAL; | |
0a06ff06 | 372 | #ifdef CONFIG_SMP |
c7c22e4d JA |
373 | unsigned long val; |
374 | ||
375 | ret = queue_var_store(&val, page, count); | |
b1f3b64d DR |
376 | if (ret < 0) |
377 | return ret; | |
378 | ||
e8037d49 | 379 | if (val == 2) { |
57d74df9 CH |
380 | blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q); |
381 | blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, q); | |
e8037d49 | 382 | } else if (val == 1) { |
57d74df9 CH |
383 | blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q); |
384 | blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); | |
e8037d49 | 385 | } else if (val == 0) { |
57d74df9 CH |
386 | blk_queue_flag_clear(QUEUE_FLAG_SAME_COMP, q); |
387 | blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); | |
5757a6d7 | 388 | } |
c7c22e4d JA |
389 | #endif |
390 | return ret; | |
391 | } | |
8324aa91 | 392 | |
06426adf JA |
393 | static ssize_t queue_poll_delay_show(struct request_queue *q, char *page) |
394 | { | |
64f1c21e JA |
395 | int val; |
396 | ||
29ece8b4 YY |
397 | if (q->poll_nsec == BLK_MQ_POLL_CLASSIC) |
398 | val = BLK_MQ_POLL_CLASSIC; | |
64f1c21e JA |
399 | else |
400 | val = q->poll_nsec / 1000; | |
401 | ||
402 | return sprintf(page, "%d\n", val); | |
06426adf JA |
403 | } |
404 | ||
405 | static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page, | |
406 | size_t count) | |
407 | { | |
64f1c21e | 408 | int err, val; |
06426adf JA |
409 | |
410 | if (!q->mq_ops || !q->mq_ops->poll) | |
411 | return -EINVAL; | |
412 | ||
64f1c21e JA |
413 | err = kstrtoint(page, 10, &val); |
414 | if (err < 0) | |
415 | return err; | |
06426adf | 416 | |
29ece8b4 YY |
417 | if (val == BLK_MQ_POLL_CLASSIC) |
418 | q->poll_nsec = BLK_MQ_POLL_CLASSIC; | |
419 | else if (val >= 0) | |
64f1c21e | 420 | q->poll_nsec = val * 1000; |
29ece8b4 YY |
421 | else |
422 | return -EINVAL; | |
64f1c21e JA |
423 | |
424 | return count; | |
06426adf JA |
425 | } |
426 | ||
05229bee JA |
427 | static ssize_t queue_poll_show(struct request_queue *q, char *page) |
428 | { | |
429 | return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page); | |
430 | } | |
431 | ||
432 | static ssize_t queue_poll_store(struct request_queue *q, const char *page, | |
433 | size_t count) | |
434 | { | |
435 | unsigned long poll_on; | |
436 | ssize_t ret; | |
437 | ||
cd19181b ML |
438 | if (!q->tag_set || q->tag_set->nr_maps <= HCTX_TYPE_POLL || |
439 | !q->tag_set->map[HCTX_TYPE_POLL].nr_queues) | |
05229bee JA |
440 | return -EINVAL; |
441 | ||
442 | ret = queue_var_store(&poll_on, page, count); | |
443 | if (ret < 0) | |
444 | return ret; | |
445 | ||
6b09b4d3 | 446 | if (poll_on) { |
8814ce8a | 447 | blk_queue_flag_set(QUEUE_FLAG_POLL, q); |
6b09b4d3 JX |
448 | } else { |
449 | blk_mq_freeze_queue(q); | |
8814ce8a | 450 | blk_queue_flag_clear(QUEUE_FLAG_POLL, q); |
6b09b4d3 JX |
451 | blk_mq_unfreeze_queue(q); |
452 | } | |
05229bee JA |
453 | |
454 | return ret; | |
455 | } | |
456 | ||
65cd1d13 WZ |
457 | static ssize_t queue_io_timeout_show(struct request_queue *q, char *page) |
458 | { | |
459 | return sprintf(page, "%u\n", jiffies_to_msecs(q->rq_timeout)); | |
460 | } | |
461 | ||
462 | static ssize_t queue_io_timeout_store(struct request_queue *q, const char *page, | |
463 | size_t count) | |
464 | { | |
465 | unsigned int val; | |
466 | int err; | |
467 | ||
468 | err = kstrtou32(page, 10, &val); | |
469 | if (err || val == 0) | |
470 | return -EINVAL; | |
471 | ||
472 | blk_queue_rq_timeout(q, msecs_to_jiffies(val)); | |
473 | ||
474 | return count; | |
475 | } | |
476 | ||
87760e5e JA |
477 | static ssize_t queue_wb_lat_show(struct request_queue *q, char *page) |
478 | { | |
a7905043 | 479 | if (!wbt_rq_qos(q)) |
87760e5e JA |
480 | return -EINVAL; |
481 | ||
a7905043 | 482 | return sprintf(page, "%llu\n", div_u64(wbt_get_min_lat(q), 1000)); |
87760e5e JA |
483 | } |
484 | ||
485 | static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page, | |
486 | size_t count) | |
487 | { | |
a7905043 | 488 | struct rq_qos *rqos; |
87760e5e | 489 | ssize_t ret; |
80e091d1 | 490 | s64 val; |
87760e5e | 491 | |
87760e5e JA |
492 | ret = queue_var_store64(&val, page); |
493 | if (ret < 0) | |
494 | return ret; | |
d62118b6 JA |
495 | if (val < -1) |
496 | return -EINVAL; | |
497 | ||
a7905043 JB |
498 | rqos = wbt_rq_qos(q); |
499 | if (!rqos) { | |
d62118b6 JA |
500 | ret = wbt_init(q); |
501 | if (ret) | |
502 | return ret; | |
d62118b6 | 503 | } |
87760e5e | 504 | |
80e091d1 | 505 | if (val == -1) |
a7905043 | 506 | val = wbt_default_latency_nsec(q); |
80e091d1 | 507 | else if (val >= 0) |
a7905043 | 508 | val *= 1000ULL; |
d62118b6 | 509 | |
b7143fe6 AZ |
510 | if (wbt_get_min_lat(q) == val) |
511 | return count; | |
512 | ||
c125311d JA |
513 | /* |
514 | * Ensure that the queue is idled, in case the latency update | |
515 | * ends up either enabling or disabling wbt completely. We can't | |
516 | * have IO inflight if that happens. | |
517 | */ | |
a1ce35fa JA |
518 | blk_mq_freeze_queue(q); |
519 | blk_mq_quiesce_queue(q); | |
80e091d1 | 520 | |
c125311d | 521 | wbt_set_min_lat(q, val); |
c125311d | 522 | |
a1ce35fa JA |
523 | blk_mq_unquiesce_queue(q); |
524 | blk_mq_unfreeze_queue(q); | |
c125311d | 525 | |
87760e5e JA |
526 | return count; |
527 | } | |
528 | ||
93e9d8e8 JA |
529 | static ssize_t queue_wc_show(struct request_queue *q, char *page) |
530 | { | |
531 | if (test_bit(QUEUE_FLAG_WC, &q->queue_flags)) | |
532 | return sprintf(page, "write back\n"); | |
533 | ||
534 | return sprintf(page, "write through\n"); | |
535 | } | |
536 | ||
537 | static ssize_t queue_wc_store(struct request_queue *q, const char *page, | |
538 | size_t count) | |
539 | { | |
540 | int set = -1; | |
541 | ||
542 | if (!strncmp(page, "write back", 10)) | |
543 | set = 1; | |
544 | else if (!strncmp(page, "write through", 13) || | |
545 | !strncmp(page, "none", 4)) | |
546 | set = 0; | |
547 | ||
548 | if (set == -1) | |
549 | return -EINVAL; | |
550 | ||
93e9d8e8 | 551 | if (set) |
8814ce8a | 552 | blk_queue_flag_set(QUEUE_FLAG_WC, q); |
93e9d8e8 | 553 | else |
8814ce8a | 554 | blk_queue_flag_clear(QUEUE_FLAG_WC, q); |
93e9d8e8 JA |
555 | |
556 | return count; | |
557 | } | |
558 | ||
6fcefbe5 KO |
559 | static ssize_t queue_fua_show(struct request_queue *q, char *page) |
560 | { | |
561 | return sprintf(page, "%u\n", test_bit(QUEUE_FLAG_FUA, &q->queue_flags)); | |
562 | } | |
563 | ||
ea6ca600 YK |
564 | static ssize_t queue_dax_show(struct request_queue *q, char *page) |
565 | { | |
566 | return queue_var_show(blk_queue_dax(q), page); | |
567 | } | |
568 | ||
35626147 CH |
569 | #define QUEUE_RO_ENTRY(_prefix, _name) \ |
570 | static struct queue_sysfs_entry _prefix##_entry = { \ | |
571 | .attr = { .name = _name, .mode = 0444 }, \ | |
572 | .show = _prefix##_show, \ | |
573 | }; | |
574 | ||
575 | #define QUEUE_RW_ENTRY(_prefix, _name) \ | |
576 | static struct queue_sysfs_entry _prefix##_entry = { \ | |
577 | .attr = { .name = _name, .mode = 0644 }, \ | |
578 | .show = _prefix##_show, \ | |
579 | .store = _prefix##_store, \ | |
580 | }; | |
581 | ||
582 | QUEUE_RW_ENTRY(queue_requests, "nr_requests"); | |
583 | QUEUE_RW_ENTRY(queue_ra, "read_ahead_kb"); | |
584 | QUEUE_RW_ENTRY(queue_max_sectors, "max_sectors_kb"); | |
585 | QUEUE_RO_ENTRY(queue_max_hw_sectors, "max_hw_sectors_kb"); | |
586 | QUEUE_RO_ENTRY(queue_max_segments, "max_segments"); | |
587 | QUEUE_RO_ENTRY(queue_max_integrity_segments, "max_integrity_segments"); | |
588 | QUEUE_RO_ENTRY(queue_max_segment_size, "max_segment_size"); | |
589 | QUEUE_RW_ENTRY(elv_iosched, "scheduler"); | |
590 | ||
591 | QUEUE_RO_ENTRY(queue_logical_block_size, "logical_block_size"); | |
592 | QUEUE_RO_ENTRY(queue_physical_block_size, "physical_block_size"); | |
593 | QUEUE_RO_ENTRY(queue_chunk_sectors, "chunk_sectors"); | |
594 | QUEUE_RO_ENTRY(queue_io_min, "minimum_io_size"); | |
595 | QUEUE_RO_ENTRY(queue_io_opt, "optimal_io_size"); | |
596 | ||
597 | QUEUE_RO_ENTRY(queue_max_discard_segments, "max_discard_segments"); | |
598 | QUEUE_RO_ENTRY(queue_discard_granularity, "discard_granularity"); | |
599 | QUEUE_RO_ENTRY(queue_discard_max_hw, "discard_max_hw_bytes"); | |
600 | QUEUE_RW_ENTRY(queue_discard_max, "discard_max_bytes"); | |
601 | QUEUE_RO_ENTRY(queue_discard_zeroes_data, "discard_zeroes_data"); | |
602 | ||
603 | QUEUE_RO_ENTRY(queue_write_same_max, "write_same_max_bytes"); | |
604 | QUEUE_RO_ENTRY(queue_write_zeroes_max, "write_zeroes_max_bytes"); | |
605 | QUEUE_RO_ENTRY(queue_zone_append_max, "zone_append_max_bytes"); | |
a805a4fa | 606 | QUEUE_RO_ENTRY(queue_zone_write_granularity, "zone_write_granularity"); |
35626147 CH |
607 | |
608 | QUEUE_RO_ENTRY(queue_zoned, "zoned"); | |
609 | QUEUE_RO_ENTRY(queue_nr_zones, "nr_zones"); | |
610 | QUEUE_RO_ENTRY(queue_max_open_zones, "max_open_zones"); | |
611 | QUEUE_RO_ENTRY(queue_max_active_zones, "max_active_zones"); | |
612 | ||
613 | QUEUE_RW_ENTRY(queue_nomerges, "nomerges"); | |
614 | QUEUE_RW_ENTRY(queue_rq_affinity, "rq_affinity"); | |
615 | QUEUE_RW_ENTRY(queue_poll, "io_poll"); | |
616 | QUEUE_RW_ENTRY(queue_poll_delay, "io_poll_delay"); | |
617 | QUEUE_RW_ENTRY(queue_wc, "write_cache"); | |
618 | QUEUE_RO_ENTRY(queue_fua, "fua"); | |
619 | QUEUE_RO_ENTRY(queue_dax, "dax"); | |
620 | QUEUE_RW_ENTRY(queue_io_timeout, "io_timeout"); | |
621 | QUEUE_RW_ENTRY(queue_wb_lat, "wbt_lat_usec"); | |
28af7428 | 622 | QUEUE_RO_ENTRY(queue_virt_boundary_mask, "virt_boundary_mask"); |
8324aa91 | 623 | |
35626147 CH |
624 | #ifdef CONFIG_BLK_DEV_THROTTLING_LOW |
625 | QUEUE_RW_ENTRY(blk_throtl_sample_time, "throttle_sample_time"); | |
626 | #endif | |
8324aa91 | 627 | |
35626147 | 628 | /* legacy alias for logical_block_size: */ |
e68b903c | 629 | static struct queue_sysfs_entry queue_hw_sector_size_entry = { |
5657a819 | 630 | .attr = {.name = "hw_sector_size", .mode = 0444 }, |
e1defc4f MP |
631 | .show = queue_logical_block_size_show, |
632 | }; | |
633 | ||
fc93fe14 CH |
634 | QUEUE_RW_ENTRY(queue_nonrot, "rotational"); |
635 | QUEUE_RW_ENTRY(queue_iostats, "iostats"); | |
636 | QUEUE_RW_ENTRY(queue_random, "add_random"); | |
1cb039f3 | 637 | QUEUE_RW_ENTRY(queue_stable_writes, "stable_writes"); |
e2e1a148 | 638 | |
4d25339e | 639 | static struct attribute *queue_attrs[] = { |
8324aa91 JA |
640 | &queue_requests_entry.attr, |
641 | &queue_ra_entry.attr, | |
642 | &queue_max_hw_sectors_entry.attr, | |
643 | &queue_max_sectors_entry.attr, | |
c77a5710 | 644 | &queue_max_segments_entry.attr, |
1e739730 | 645 | &queue_max_discard_segments_entry.attr, |
13f05c8d | 646 | &queue_max_integrity_segments_entry.attr, |
c77a5710 | 647 | &queue_max_segment_size_entry.attr, |
35626147 | 648 | &elv_iosched_entry.attr, |
e68b903c | 649 | &queue_hw_sector_size_entry.attr, |
e1defc4f | 650 | &queue_logical_block_size_entry.attr, |
c72758f3 | 651 | &queue_physical_block_size_entry.attr, |
87caf97c | 652 | &queue_chunk_sectors_entry.attr, |
c72758f3 MP |
653 | &queue_io_min_entry.attr, |
654 | &queue_io_opt_entry.attr, | |
86b37281 MP |
655 | &queue_discard_granularity_entry.attr, |
656 | &queue_discard_max_entry.attr, | |
0034af03 | 657 | &queue_discard_max_hw_entry.attr, |
98262f27 | 658 | &queue_discard_zeroes_data_entry.attr, |
4363ac7c | 659 | &queue_write_same_max_entry.attr, |
a6f0788e | 660 | &queue_write_zeroes_max_entry.attr, |
0512a75b | 661 | &queue_zone_append_max_entry.attr, |
a805a4fa | 662 | &queue_zone_write_granularity_entry.attr, |
1308835f | 663 | &queue_nonrot_entry.attr, |
797476b8 | 664 | &queue_zoned_entry.attr, |
965b652e | 665 | &queue_nr_zones_entry.attr, |
e15864f8 | 666 | &queue_max_open_zones_entry.attr, |
659bf827 | 667 | &queue_max_active_zones_entry.attr, |
ac9fafa1 | 668 | &queue_nomerges_entry.attr, |
c7c22e4d | 669 | &queue_rq_affinity_entry.attr, |
bc58ba94 | 670 | &queue_iostats_entry.attr, |
1cb039f3 | 671 | &queue_stable_writes_entry.attr, |
e2e1a148 | 672 | &queue_random_entry.attr, |
05229bee | 673 | &queue_poll_entry.attr, |
93e9d8e8 | 674 | &queue_wc_entry.attr, |
6fcefbe5 | 675 | &queue_fua_entry.attr, |
ea6ca600 | 676 | &queue_dax_entry.attr, |
87760e5e | 677 | &queue_wb_lat_entry.attr, |
06426adf | 678 | &queue_poll_delay_entry.attr, |
65cd1d13 | 679 | &queue_io_timeout_entry.attr, |
297e3d85 | 680 | #ifdef CONFIG_BLK_DEV_THROTTLING_LOW |
35626147 | 681 | &blk_throtl_sample_time_entry.attr, |
297e3d85 | 682 | #endif |
28af7428 | 683 | &queue_virt_boundary_mask_entry.attr, |
8324aa91 JA |
684 | NULL, |
685 | }; | |
686 | ||
4d25339e WZ |
687 | static umode_t queue_attr_visible(struct kobject *kobj, struct attribute *attr, |
688 | int n) | |
689 | { | |
690 | struct request_queue *q = | |
691 | container_of(kobj, struct request_queue, kobj); | |
692 | ||
693 | if (attr == &queue_io_timeout_entry.attr && | |
694 | (!q->mq_ops || !q->mq_ops->timeout)) | |
695 | return 0; | |
696 | ||
659bf827 NC |
697 | if ((attr == &queue_max_open_zones_entry.attr || |
698 | attr == &queue_max_active_zones_entry.attr) && | |
e15864f8 NC |
699 | !blk_queue_is_zoned(q)) |
700 | return 0; | |
701 | ||
4d25339e WZ |
702 | return attr->mode; |
703 | } | |
704 | ||
705 | static struct attribute_group queue_attr_group = { | |
706 | .attrs = queue_attrs, | |
707 | .is_visible = queue_attr_visible, | |
708 | }; | |
709 | ||
710 | ||
8324aa91 JA |
711 | #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr) |
712 | ||
713 | static ssize_t | |
714 | queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page) | |
715 | { | |
716 | struct queue_sysfs_entry *entry = to_queue(attr); | |
717 | struct request_queue *q = | |
718 | container_of(kobj, struct request_queue, kobj); | |
719 | ssize_t res; | |
720 | ||
721 | if (!entry->show) | |
722 | return -EIO; | |
723 | mutex_lock(&q->sysfs_lock); | |
8324aa91 JA |
724 | res = entry->show(q, page); |
725 | mutex_unlock(&q->sysfs_lock); | |
726 | return res; | |
727 | } | |
728 | ||
729 | static ssize_t | |
730 | queue_attr_store(struct kobject *kobj, struct attribute *attr, | |
731 | const char *page, size_t length) | |
732 | { | |
733 | struct queue_sysfs_entry *entry = to_queue(attr); | |
6728cb0e | 734 | struct request_queue *q; |
8324aa91 JA |
735 | ssize_t res; |
736 | ||
737 | if (!entry->store) | |
738 | return -EIO; | |
6728cb0e JA |
739 | |
740 | q = container_of(kobj, struct request_queue, kobj); | |
8324aa91 | 741 | mutex_lock(&q->sysfs_lock); |
8324aa91 JA |
742 | res = entry->store(q, page, length); |
743 | mutex_unlock(&q->sysfs_lock); | |
744 | return res; | |
745 | } | |
746 | ||
548bc8e1 TH |
747 | static void blk_free_queue_rcu(struct rcu_head *rcu_head) |
748 | { | |
749 | struct request_queue *q = container_of(rcu_head, struct request_queue, | |
750 | rcu_head); | |
751 | kmem_cache_free(blk_requestq_cachep, q); | |
752 | } | |
753 | ||
47cdee29 ML |
754 | /* Unconfigure the I/O scheduler and dissociate from the cgroup controller. */ |
755 | static void blk_exit_queue(struct request_queue *q) | |
756 | { | |
757 | /* | |
758 | * Since the I/O scheduler exit code may access cgroup information, | |
759 | * perform I/O scheduler exit before disassociating from the block | |
760 | * cgroup controller. | |
761 | */ | |
762 | if (q->elevator) { | |
763 | ioc_clear_queue(q); | |
c3e22192 | 764 | __elevator_exit(q, q->elevator); |
47cdee29 ML |
765 | } |
766 | ||
767 | /* | |
768 | * Remove all references to @q from the block cgroup controller before | |
769 | * restoring @q->queue_lock to avoid that restoring this pointer causes | |
770 | * e.g. blkcg_print_blkgs() to crash. | |
771 | */ | |
772 | blkcg_exit_queue(q); | |
47cdee29 ML |
773 | } |
774 | ||
8324aa91 | 775 | /** |
e8c7d14a LC |
776 | * blk_release_queue - releases all allocated resources of the request_queue |
777 | * @kobj: pointer to a kobject, whose container is a request_queue | |
778 | * | |
779 | * This function releases all allocated resources of the request queue. | |
780 | * | |
781 | * The struct request_queue refcount is incremented with blk_get_queue() and | |
782 | * decremented with blk_put_queue(). Once the refcount reaches 0 this function | |
783 | * is called. | |
784 | * | |
785 | * For drivers that have a request_queue on a gendisk and added with | |
786 | * __device_add_disk() the refcount to request_queue will reach 0 with | |
787 | * the last put_disk() called by the driver. For drivers which don't use | |
788 | * __device_add_disk() this happens with blk_cleanup_queue(). | |
8324aa91 | 789 | * |
e8c7d14a LC |
790 | * Drivers exist which depend on the release of the request_queue to be |
791 | * synchronous, it should not be deferred. | |
792 | * | |
793 | * Context: can sleep | |
dc9edc44 | 794 | */ |
e8c7d14a | 795 | static void blk_release_queue(struct kobject *kobj) |
8324aa91 | 796 | { |
e8c7d14a LC |
797 | struct request_queue *q = |
798 | container_of(kobj, struct request_queue, kobj); | |
799 | ||
800 | might_sleep(); | |
8324aa91 | 801 | |
34dbad5d OS |
802 | if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags)) |
803 | blk_stat_remove_callback(q, q->poll_cb); | |
804 | blk_stat_free_callback(q->poll_cb); | |
777eb1bf | 805 | |
34dbad5d OS |
806 | blk_free_queue_stats(q->stats); |
807 | ||
47ce030b YY |
808 | if (queue_is_mq(q)) { |
809 | struct blk_mq_hw_ctx *hctx; | |
810 | int i; | |
811 | ||
e26cc082 | 812 | cancel_delayed_work_sync(&q->requeue_work); |
813 | ||
47ce030b YY |
814 | queue_for_each_hw_ctx(q, hctx, i) |
815 | cancel_delayed_work_sync(&hctx->run_work); | |
816 | } | |
817 | ||
47cdee29 ML |
818 | blk_exit_queue(q); |
819 | ||
bf505456 DLM |
820 | blk_queue_free_zone_bitmaps(q); |
821 | ||
344e9ffc | 822 | if (queue_is_mq(q)) |
e09aae7e | 823 | blk_mq_release(q); |
18741986 | 824 | |
8324aa91 | 825 | blk_trace_shutdown(q); |
85e0cbbb LC |
826 | mutex_lock(&q->debugfs_mutex); |
827 | debugfs_remove_recursive(q->debugfs_dir); | |
828 | mutex_unlock(&q->debugfs_mutex); | |
8324aa91 | 829 | |
344e9ffc | 830 | if (queue_is_mq(q)) |
62ebce16 OS |
831 | blk_mq_debugfs_unregister(q); |
832 | ||
338aa96d | 833 | bioset_exit(&q->bio_split); |
54efd50b | 834 | |
a73f730d | 835 | ida_simple_remove(&blk_queue_ida, q->id); |
548bc8e1 | 836 | call_rcu(&q->rcu_head, blk_free_queue_rcu); |
8324aa91 JA |
837 | } |
838 | ||
52cf25d0 | 839 | static const struct sysfs_ops queue_sysfs_ops = { |
8324aa91 JA |
840 | .show = queue_attr_show, |
841 | .store = queue_attr_store, | |
842 | }; | |
843 | ||
844 | struct kobj_type blk_queue_ktype = { | |
845 | .sysfs_ops = &queue_sysfs_ops, | |
8324aa91 JA |
846 | .release = blk_release_queue, |
847 | }; | |
848 | ||
2c2086af BVA |
849 | /** |
850 | * blk_register_queue - register a block layer queue with sysfs | |
851 | * @disk: Disk of which the request queue should be registered with sysfs. | |
852 | */ | |
8324aa91 JA |
853 | int blk_register_queue(struct gendisk *disk) |
854 | { | |
855 | int ret; | |
1d54ad6d | 856 | struct device *dev = disk_to_dev(disk); |
8324aa91 JA |
857 | struct request_queue *q = disk->queue; |
858 | ||
1d54ad6d LZ |
859 | ret = blk_trace_init_sysfs(dev); |
860 | if (ret) | |
861 | return ret; | |
862 | ||
cecf5d87 | 863 | mutex_lock(&q->sysfs_dir_lock); |
b410aff2 | 864 | |
c9059598 | 865 | ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue"); |
ed5302d3 LY |
866 | if (ret < 0) { |
867 | blk_trace_remove_sysfs(dev); | |
b410aff2 | 868 | goto unlock; |
ed5302d3 | 869 | } |
8324aa91 | 870 | |
4d25339e WZ |
871 | ret = sysfs_create_group(&q->kobj, &queue_attr_group); |
872 | if (ret) { | |
873 | blk_trace_remove_sysfs(dev); | |
874 | kobject_del(&q->kobj); | |
875 | kobject_put(&dev->kobj); | |
876 | goto unlock; | |
877 | } | |
878 | ||
85e0cbbb LC |
879 | mutex_lock(&q->debugfs_mutex); |
880 | q->debugfs_dir = debugfs_create_dir(kobject_name(q->kobj.parent), | |
881 | blk_debugfs_root); | |
882 | mutex_unlock(&q->debugfs_mutex); | |
883 | ||
344e9ffc | 884 | if (queue_is_mq(q)) { |
2d0364c8 | 885 | __blk_mq_register_dev(dev, q); |
a8ecdd71 BVA |
886 | blk_mq_debugfs_register(q); |
887 | } | |
9c1051aa | 888 | |
b89f625e | 889 | mutex_lock(&q->sysfs_lock); |
344e9ffc | 890 | if (q->elevator) { |
cecf5d87 | 891 | ret = elv_register_queue(q, false); |
80c6b157 | 892 | if (ret) { |
b89f625e | 893 | mutex_unlock(&q->sysfs_lock); |
cecf5d87 | 894 | mutex_unlock(&q->sysfs_dir_lock); |
80c6b157 OS |
895 | kobject_del(&q->kobj); |
896 | blk_trace_remove_sysfs(dev); | |
897 | kobject_put(&dev->kobj); | |
2c2086af | 898 | return ret; |
80c6b157 | 899 | } |
8324aa91 | 900 | } |
cecf5d87 | 901 | |
cecf5d87 ML |
902 | blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q); |
903 | wbt_enable_default(q); | |
904 | blk_throtl_register_queue(q); | |
905 | ||
906 | /* Now everything is ready and send out KOBJ_ADD uevent */ | |
907 | kobject_uevent(&q->kobj, KOBJ_ADD); | |
0546858c | 908 | if (q->elevator) |
cecf5d87 ML |
909 | kobject_uevent(&q->elevator->kobj, KOBJ_ADD); |
910 | mutex_unlock(&q->sysfs_lock); | |
911 | ||
b410aff2 TE |
912 | ret = 0; |
913 | unlock: | |
cecf5d87 | 914 | mutex_unlock(&q->sysfs_dir_lock); |
a72c374f ML |
915 | |
916 | /* | |
917 | * SCSI probing may synchronously create and destroy a lot of | |
918 | * request_queues for non-existent devices. Shutting down a fully | |
919 | * functional queue takes measureable wallclock time as RCU grace | |
920 | * periods are involved. To avoid excessive latency in these | |
921 | * cases, a request_queue starts out in a degraded mode which is | |
922 | * faster to shut down and is made fully functional here as | |
923 | * request_queues for non-existent devices never get registered. | |
924 | */ | |
925 | if (!blk_queue_init_done(q)) { | |
926 | blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q); | |
927 | percpu_ref_switch_to_percpu(&q->q_usage_counter); | |
928 | } | |
929 | ||
b410aff2 | 930 | return ret; |
8324aa91 JA |
931 | } |
932 | ||
2c2086af BVA |
933 | /** |
934 | * blk_unregister_queue - counterpart of blk_register_queue() | |
935 | * @disk: Disk of which the request queue should be unregistered from sysfs. | |
936 | * | |
937 | * Note: the caller is responsible for guaranteeing that this function is called | |
938 | * after blk_register_queue() has finished. | |
939 | */ | |
8324aa91 JA |
940 | void blk_unregister_queue(struct gendisk *disk) |
941 | { | |
942 | struct request_queue *q = disk->queue; | |
943 | ||
fb199746 AM |
944 | if (WARN_ON(!q)) |
945 | return; | |
946 | ||
fa70d2e2 | 947 | /* Return early if disk->queue was never registered. */ |
58c898ba | 948 | if (!blk_queue_registered(q)) |
fa70d2e2 MS |
949 | return; |
950 | ||
667257e8 | 951 | /* |
2c2086af BVA |
952 | * Since sysfs_remove_dir() prevents adding new directory entries |
953 | * before removal of existing entries starts, protect against | |
954 | * concurrent elv_iosched_store() calls. | |
667257e8 | 955 | */ |
e9a823fb | 956 | mutex_lock(&q->sysfs_lock); |
8814ce8a | 957 | blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q); |
cecf5d87 | 958 | mutex_unlock(&q->sysfs_lock); |
02ba8893 | 959 | |
cecf5d87 | 960 | mutex_lock(&q->sysfs_dir_lock); |
2c2086af BVA |
961 | /* |
962 | * Remove the sysfs attributes before unregistering the queue data | |
963 | * structures that can be modified through sysfs. | |
964 | */ | |
344e9ffc | 965 | if (queue_is_mq(q)) |
b21d5b30 | 966 | blk_mq_unregister_dev(disk_to_dev(disk), q); |
8324aa91 | 967 | |
48c0d4d4 ZK |
968 | kobject_uevent(&q->kobj, KOBJ_REMOVE); |
969 | kobject_del(&q->kobj); | |
970 | blk_trace_remove_sysfs(disk_to_dev(disk)); | |
667257e8 | 971 | |
b89f625e | 972 | mutex_lock(&q->sysfs_lock); |
344e9ffc | 973 | if (q->elevator) |
2c2086af | 974 | elv_unregister_queue(q); |
b89f625e | 975 | mutex_unlock(&q->sysfs_lock); |
cecf5d87 | 976 | mutex_unlock(&q->sysfs_dir_lock); |
2c2086af BVA |
977 | |
978 | kobject_put(&disk_to_dev(disk)->kobj); | |
8324aa91 | 979 | } |