Commit | Line | Data |
---|---|---|
8324aa91 JA |
1 | /* |
2 | * Functions related to sysfs handling | |
3 | */ | |
4 | #include <linux/kernel.h> | |
5 | #include <linux/module.h> | |
6 | #include <linux/bio.h> | |
7 | #include <linux/blkdev.h> | |
8 | #include <linux/blktrace_api.h> | |
9 | ||
10 | #include "blk.h" | |
11 | ||
12 | struct queue_sysfs_entry { | |
13 | struct attribute attr; | |
14 | ssize_t (*show)(struct request_queue *, char *); | |
15 | ssize_t (*store)(struct request_queue *, const char *, size_t); | |
16 | }; | |
17 | ||
18 | static ssize_t | |
9cb308ce | 19 | queue_var_show(unsigned long var, char *page) |
8324aa91 | 20 | { |
9cb308ce | 21 | return sprintf(page, "%lu\n", var); |
8324aa91 JA |
22 | } |
23 | ||
24 | static ssize_t | |
25 | queue_var_store(unsigned long *var, const char *page, size_t count) | |
26 | { | |
27 | char *p = (char *) page; | |
28 | ||
29 | *var = simple_strtoul(p, &p, 10); | |
30 | return count; | |
31 | } | |
32 | ||
33 | static ssize_t queue_requests_show(struct request_queue *q, char *page) | |
34 | { | |
35 | return queue_var_show(q->nr_requests, (page)); | |
36 | } | |
37 | ||
38 | static ssize_t | |
39 | queue_requests_store(struct request_queue *q, const char *page, size_t count) | |
40 | { | |
41 | struct request_list *rl = &q->rq; | |
42 | unsigned long nr; | |
b8a9ae77 JA |
43 | int ret; |
44 | ||
45 | if (!q->request_fn) | |
46 | return -EINVAL; | |
47 | ||
48 | ret = queue_var_store(&nr, page, count); | |
8324aa91 JA |
49 | if (nr < BLKDEV_MIN_RQ) |
50 | nr = BLKDEV_MIN_RQ; | |
51 | ||
52 | spin_lock_irq(q->queue_lock); | |
53 | q->nr_requests = nr; | |
54 | blk_queue_congestion_threshold(q); | |
55 | ||
1faa16d2 JA |
56 | if (rl->count[BLK_RW_SYNC] >= queue_congestion_on_threshold(q)) |
57 | blk_set_queue_congested(q, BLK_RW_SYNC); | |
58 | else if (rl->count[BLK_RW_SYNC] < queue_congestion_off_threshold(q)) | |
59 | blk_clear_queue_congested(q, BLK_RW_SYNC); | |
60 | ||
61 | if (rl->count[BLK_RW_ASYNC] >= queue_congestion_on_threshold(q)) | |
62 | blk_set_queue_congested(q, BLK_RW_ASYNC); | |
63 | else if (rl->count[BLK_RW_ASYNC] < queue_congestion_off_threshold(q)) | |
64 | blk_clear_queue_congested(q, BLK_RW_ASYNC); | |
65 | ||
66 | if (rl->count[BLK_RW_SYNC] >= q->nr_requests) { | |
67 | blk_set_queue_full(q, BLK_RW_SYNC); | |
68 | } else if (rl->count[BLK_RW_SYNC]+1 <= q->nr_requests) { | |
69 | blk_clear_queue_full(q, BLK_RW_SYNC); | |
70 | wake_up(&rl->wait[BLK_RW_SYNC]); | |
8324aa91 JA |
71 | } |
72 | ||
1faa16d2 JA |
73 | if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) { |
74 | blk_set_queue_full(q, BLK_RW_ASYNC); | |
75 | } else if (rl->count[BLK_RW_ASYNC]+1 <= q->nr_requests) { | |
76 | blk_clear_queue_full(q, BLK_RW_ASYNC); | |
77 | wake_up(&rl->wait[BLK_RW_ASYNC]); | |
8324aa91 JA |
78 | } |
79 | spin_unlock_irq(q->queue_lock); | |
80 | return ret; | |
81 | } | |
82 | ||
83 | static ssize_t queue_ra_show(struct request_queue *q, char *page) | |
84 | { | |
9cb308ce XF |
85 | unsigned long ra_kb = q->backing_dev_info.ra_pages << |
86 | (PAGE_CACHE_SHIFT - 10); | |
8324aa91 JA |
87 | |
88 | return queue_var_show(ra_kb, (page)); | |
89 | } | |
90 | ||
91 | static ssize_t | |
92 | queue_ra_store(struct request_queue *q, const char *page, size_t count) | |
93 | { | |
94 | unsigned long ra_kb; | |
95 | ssize_t ret = queue_var_store(&ra_kb, page, count); | |
96 | ||
8324aa91 | 97 | q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10); |
8324aa91 JA |
98 | |
99 | return ret; | |
100 | } | |
101 | ||
102 | static ssize_t queue_max_sectors_show(struct request_queue *q, char *page) | |
103 | { | |
ae03bf63 | 104 | int max_sectors_kb = queue_max_sectors(q) >> 1; |
8324aa91 JA |
105 | |
106 | return queue_var_show(max_sectors_kb, (page)); | |
107 | } | |
108 | ||
e1defc4f | 109 | static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page) |
e68b903c | 110 | { |
e1defc4f | 111 | return queue_var_show(queue_logical_block_size(q), page); |
e68b903c MP |
112 | } |
113 | ||
c72758f3 MP |
114 | static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page) |
115 | { | |
116 | return queue_var_show(queue_physical_block_size(q), page); | |
117 | } | |
118 | ||
119 | static ssize_t queue_io_min_show(struct request_queue *q, char *page) | |
120 | { | |
121 | return queue_var_show(queue_io_min(q), page); | |
122 | } | |
123 | ||
124 | static ssize_t queue_io_opt_show(struct request_queue *q, char *page) | |
125 | { | |
126 | return queue_var_show(queue_io_opt(q), page); | |
e68b903c MP |
127 | } |
128 | ||
86b37281 MP |
129 | static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page) |
130 | { | |
131 | return queue_var_show(q->limits.discard_granularity, page); | |
132 | } | |
133 | ||
134 | static ssize_t queue_discard_max_show(struct request_queue *q, char *page) | |
135 | { | |
136 | return queue_var_show(q->limits.max_discard_sectors << 9, page); | |
137 | } | |
138 | ||
8324aa91 JA |
139 | static ssize_t |
140 | queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) | |
141 | { | |
142 | unsigned long max_sectors_kb, | |
ae03bf63 | 143 | max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1, |
8324aa91 JA |
144 | page_kb = 1 << (PAGE_CACHE_SHIFT - 10); |
145 | ssize_t ret = queue_var_store(&max_sectors_kb, page, count); | |
146 | ||
147 | if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb) | |
148 | return -EINVAL; | |
7c239517 | 149 | |
8324aa91 | 150 | spin_lock_irq(q->queue_lock); |
c295fc05 | 151 | q->limits.max_sectors = max_sectors_kb << 1; |
8324aa91 JA |
152 | spin_unlock_irq(q->queue_lock); |
153 | ||
154 | return ret; | |
155 | } | |
156 | ||
157 | static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page) | |
158 | { | |
ae03bf63 | 159 | int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1; |
8324aa91 JA |
160 | |
161 | return queue_var_show(max_hw_sectors_kb, (page)); | |
162 | } | |
163 | ||
1308835f BZ |
164 | static ssize_t queue_nonrot_show(struct request_queue *q, char *page) |
165 | { | |
166 | return queue_var_show(!blk_queue_nonrot(q), page); | |
167 | } | |
168 | ||
169 | static ssize_t queue_nonrot_store(struct request_queue *q, const char *page, | |
170 | size_t count) | |
171 | { | |
172 | unsigned long nm; | |
173 | ssize_t ret = queue_var_store(&nm, page, count); | |
174 | ||
175 | spin_lock_irq(q->queue_lock); | |
176 | if (nm) | |
177 | queue_flag_clear(QUEUE_FLAG_NONROT, q); | |
178 | else | |
179 | queue_flag_set(QUEUE_FLAG_NONROT, q); | |
180 | spin_unlock_irq(q->queue_lock); | |
181 | ||
182 | return ret; | |
183 | } | |
184 | ||
ac9fafa1 AB |
185 | static ssize_t queue_nomerges_show(struct request_queue *q, char *page) |
186 | { | |
187 | return queue_var_show(blk_queue_nomerges(q), page); | |
188 | } | |
189 | ||
190 | static ssize_t queue_nomerges_store(struct request_queue *q, const char *page, | |
191 | size_t count) | |
192 | { | |
193 | unsigned long nm; | |
194 | ssize_t ret = queue_var_store(&nm, page, count); | |
195 | ||
bf0f9702 | 196 | spin_lock_irq(q->queue_lock); |
ac9fafa1 | 197 | if (nm) |
bf0f9702 | 198 | queue_flag_set(QUEUE_FLAG_NOMERGES, q); |
ac9fafa1 | 199 | else |
bf0f9702 | 200 | queue_flag_clear(QUEUE_FLAG_NOMERGES, q); |
bf0f9702 | 201 | spin_unlock_irq(q->queue_lock); |
1308835f | 202 | |
ac9fafa1 AB |
203 | return ret; |
204 | } | |
205 | ||
c7c22e4d JA |
206 | static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page) |
207 | { | |
9cb308ce | 208 | bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags); |
c7c22e4d | 209 | |
9cb308ce | 210 | return queue_var_show(set, page); |
c7c22e4d JA |
211 | } |
212 | ||
213 | static ssize_t | |
214 | queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count) | |
215 | { | |
216 | ssize_t ret = -EINVAL; | |
217 | #if defined(CONFIG_USE_GENERIC_SMP_HELPERS) | |
218 | unsigned long val; | |
219 | ||
220 | ret = queue_var_store(&val, page, count); | |
221 | spin_lock_irq(q->queue_lock); | |
222 | if (val) | |
223 | queue_flag_set(QUEUE_FLAG_SAME_COMP, q); | |
224 | else | |
225 | queue_flag_clear(QUEUE_FLAG_SAME_COMP, q); | |
226 | spin_unlock_irq(q->queue_lock); | |
227 | #endif | |
228 | return ret; | |
229 | } | |
8324aa91 | 230 | |
bc58ba94 JA |
231 | static ssize_t queue_iostats_show(struct request_queue *q, char *page) |
232 | { | |
233 | return queue_var_show(blk_queue_io_stat(q), page); | |
234 | } | |
235 | ||
236 | static ssize_t queue_iostats_store(struct request_queue *q, const char *page, | |
237 | size_t count) | |
238 | { | |
239 | unsigned long stats; | |
240 | ssize_t ret = queue_var_store(&stats, page, count); | |
241 | ||
242 | spin_lock_irq(q->queue_lock); | |
243 | if (stats) | |
244 | queue_flag_set(QUEUE_FLAG_IO_STAT, q); | |
245 | else | |
246 | queue_flag_clear(QUEUE_FLAG_IO_STAT, q); | |
247 | spin_unlock_irq(q->queue_lock); | |
248 | ||
249 | return ret; | |
250 | } | |
251 | ||
8324aa91 JA |
252 | static struct queue_sysfs_entry queue_requests_entry = { |
253 | .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR }, | |
254 | .show = queue_requests_show, | |
255 | .store = queue_requests_store, | |
256 | }; | |
257 | ||
258 | static struct queue_sysfs_entry queue_ra_entry = { | |
259 | .attr = {.name = "read_ahead_kb", .mode = S_IRUGO | S_IWUSR }, | |
260 | .show = queue_ra_show, | |
261 | .store = queue_ra_store, | |
262 | }; | |
263 | ||
264 | static struct queue_sysfs_entry queue_max_sectors_entry = { | |
265 | .attr = {.name = "max_sectors_kb", .mode = S_IRUGO | S_IWUSR }, | |
266 | .show = queue_max_sectors_show, | |
267 | .store = queue_max_sectors_store, | |
268 | }; | |
269 | ||
270 | static struct queue_sysfs_entry queue_max_hw_sectors_entry = { | |
271 | .attr = {.name = "max_hw_sectors_kb", .mode = S_IRUGO }, | |
272 | .show = queue_max_hw_sectors_show, | |
273 | }; | |
274 | ||
275 | static struct queue_sysfs_entry queue_iosched_entry = { | |
276 | .attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR }, | |
277 | .show = elv_iosched_show, | |
278 | .store = elv_iosched_store, | |
279 | }; | |
280 | ||
e68b903c MP |
281 | static struct queue_sysfs_entry queue_hw_sector_size_entry = { |
282 | .attr = {.name = "hw_sector_size", .mode = S_IRUGO }, | |
e1defc4f MP |
283 | .show = queue_logical_block_size_show, |
284 | }; | |
285 | ||
286 | static struct queue_sysfs_entry queue_logical_block_size_entry = { | |
287 | .attr = {.name = "logical_block_size", .mode = S_IRUGO }, | |
288 | .show = queue_logical_block_size_show, | |
e68b903c MP |
289 | }; |
290 | ||
c72758f3 MP |
291 | static struct queue_sysfs_entry queue_physical_block_size_entry = { |
292 | .attr = {.name = "physical_block_size", .mode = S_IRUGO }, | |
293 | .show = queue_physical_block_size_show, | |
294 | }; | |
295 | ||
296 | static struct queue_sysfs_entry queue_io_min_entry = { | |
297 | .attr = {.name = "minimum_io_size", .mode = S_IRUGO }, | |
298 | .show = queue_io_min_show, | |
299 | }; | |
300 | ||
301 | static struct queue_sysfs_entry queue_io_opt_entry = { | |
302 | .attr = {.name = "optimal_io_size", .mode = S_IRUGO }, | |
303 | .show = queue_io_opt_show, | |
e68b903c MP |
304 | }; |
305 | ||
86b37281 MP |
306 | static struct queue_sysfs_entry queue_discard_granularity_entry = { |
307 | .attr = {.name = "discard_granularity", .mode = S_IRUGO }, | |
308 | .show = queue_discard_granularity_show, | |
309 | }; | |
310 | ||
311 | static struct queue_sysfs_entry queue_discard_max_entry = { | |
312 | .attr = {.name = "discard_max_bytes", .mode = S_IRUGO }, | |
313 | .show = queue_discard_max_show, | |
314 | }; | |
315 | ||
1308835f BZ |
316 | static struct queue_sysfs_entry queue_nonrot_entry = { |
317 | .attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR }, | |
318 | .show = queue_nonrot_show, | |
319 | .store = queue_nonrot_store, | |
320 | }; | |
321 | ||
ac9fafa1 AB |
322 | static struct queue_sysfs_entry queue_nomerges_entry = { |
323 | .attr = {.name = "nomerges", .mode = S_IRUGO | S_IWUSR }, | |
324 | .show = queue_nomerges_show, | |
325 | .store = queue_nomerges_store, | |
326 | }; | |
327 | ||
c7c22e4d JA |
328 | static struct queue_sysfs_entry queue_rq_affinity_entry = { |
329 | .attr = {.name = "rq_affinity", .mode = S_IRUGO | S_IWUSR }, | |
330 | .show = queue_rq_affinity_show, | |
331 | .store = queue_rq_affinity_store, | |
332 | }; | |
333 | ||
bc58ba94 JA |
334 | static struct queue_sysfs_entry queue_iostats_entry = { |
335 | .attr = {.name = "iostats", .mode = S_IRUGO | S_IWUSR }, | |
336 | .show = queue_iostats_show, | |
337 | .store = queue_iostats_store, | |
338 | }; | |
339 | ||
8324aa91 JA |
340 | static struct attribute *default_attrs[] = { |
341 | &queue_requests_entry.attr, | |
342 | &queue_ra_entry.attr, | |
343 | &queue_max_hw_sectors_entry.attr, | |
344 | &queue_max_sectors_entry.attr, | |
345 | &queue_iosched_entry.attr, | |
e68b903c | 346 | &queue_hw_sector_size_entry.attr, |
e1defc4f | 347 | &queue_logical_block_size_entry.attr, |
c72758f3 MP |
348 | &queue_physical_block_size_entry.attr, |
349 | &queue_io_min_entry.attr, | |
350 | &queue_io_opt_entry.attr, | |
86b37281 MP |
351 | &queue_discard_granularity_entry.attr, |
352 | &queue_discard_max_entry.attr, | |
1308835f | 353 | &queue_nonrot_entry.attr, |
ac9fafa1 | 354 | &queue_nomerges_entry.attr, |
c7c22e4d | 355 | &queue_rq_affinity_entry.attr, |
bc58ba94 | 356 | &queue_iostats_entry.attr, |
8324aa91 JA |
357 | NULL, |
358 | }; | |
359 | ||
360 | #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr) | |
361 | ||
362 | static ssize_t | |
363 | queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page) | |
364 | { | |
365 | struct queue_sysfs_entry *entry = to_queue(attr); | |
366 | struct request_queue *q = | |
367 | container_of(kobj, struct request_queue, kobj); | |
368 | ssize_t res; | |
369 | ||
370 | if (!entry->show) | |
371 | return -EIO; | |
372 | mutex_lock(&q->sysfs_lock); | |
373 | if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) { | |
374 | mutex_unlock(&q->sysfs_lock); | |
375 | return -ENOENT; | |
376 | } | |
377 | res = entry->show(q, page); | |
378 | mutex_unlock(&q->sysfs_lock); | |
379 | return res; | |
380 | } | |
381 | ||
382 | static ssize_t | |
383 | queue_attr_store(struct kobject *kobj, struct attribute *attr, | |
384 | const char *page, size_t length) | |
385 | { | |
386 | struct queue_sysfs_entry *entry = to_queue(attr); | |
6728cb0e | 387 | struct request_queue *q; |
8324aa91 JA |
388 | ssize_t res; |
389 | ||
390 | if (!entry->store) | |
391 | return -EIO; | |
6728cb0e JA |
392 | |
393 | q = container_of(kobj, struct request_queue, kobj); | |
8324aa91 JA |
394 | mutex_lock(&q->sysfs_lock); |
395 | if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) { | |
396 | mutex_unlock(&q->sysfs_lock); | |
397 | return -ENOENT; | |
398 | } | |
399 | res = entry->store(q, page, length); | |
400 | mutex_unlock(&q->sysfs_lock); | |
401 | return res; | |
402 | } | |
403 | ||
404 | /** | |
405 | * blk_cleanup_queue: - release a &struct request_queue when it is no longer needed | |
406 | * @kobj: the kobj belonging of the request queue to be released | |
407 | * | |
408 | * Description: | |
409 | * blk_cleanup_queue is the pair to blk_init_queue() or | |
410 | * blk_queue_make_request(). It should be called when a request queue is | |
411 | * being released; typically when a block device is being de-registered. | |
412 | * Currently, its primary task it to free all the &struct request | |
413 | * structures that were allocated to the queue and the queue itself. | |
414 | * | |
415 | * Caveat: | |
416 | * Hopefully the low level driver will have finished any | |
417 | * outstanding requests first... | |
418 | **/ | |
419 | static void blk_release_queue(struct kobject *kobj) | |
420 | { | |
421 | struct request_queue *q = | |
422 | container_of(kobj, struct request_queue, kobj); | |
423 | struct request_list *rl = &q->rq; | |
424 | ||
425 | blk_sync_queue(q); | |
426 | ||
427 | if (rl->rq_pool) | |
428 | mempool_destroy(rl->rq_pool); | |
429 | ||
430 | if (q->queue_tags) | |
431 | __blk_queue_free_tags(q); | |
432 | ||
433 | blk_trace_shutdown(q); | |
434 | ||
435 | bdi_destroy(&q->backing_dev_info); | |
436 | kmem_cache_free(blk_requestq_cachep, q); | |
437 | } | |
438 | ||
439 | static struct sysfs_ops queue_sysfs_ops = { | |
440 | .show = queue_attr_show, | |
441 | .store = queue_attr_store, | |
442 | }; | |
443 | ||
444 | struct kobj_type blk_queue_ktype = { | |
445 | .sysfs_ops = &queue_sysfs_ops, | |
446 | .default_attrs = default_attrs, | |
447 | .release = blk_release_queue, | |
448 | }; | |
449 | ||
450 | int blk_register_queue(struct gendisk *disk) | |
451 | { | |
452 | int ret; | |
1d54ad6d | 453 | struct device *dev = disk_to_dev(disk); |
8324aa91 JA |
454 | |
455 | struct request_queue *q = disk->queue; | |
456 | ||
fb199746 | 457 | if (WARN_ON(!q)) |
8324aa91 JA |
458 | return -ENXIO; |
459 | ||
1d54ad6d LZ |
460 | ret = blk_trace_init_sysfs(dev); |
461 | if (ret) | |
462 | return ret; | |
463 | ||
c9059598 | 464 | ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue"); |
8324aa91 JA |
465 | if (ret < 0) |
466 | return ret; | |
467 | ||
468 | kobject_uevent(&q->kobj, KOBJ_ADD); | |
469 | ||
cd43e26f MP |
470 | if (!q->request_fn) |
471 | return 0; | |
472 | ||
8324aa91 JA |
473 | ret = elv_register_queue(q); |
474 | if (ret) { | |
475 | kobject_uevent(&q->kobj, KOBJ_REMOVE); | |
476 | kobject_del(&q->kobj); | |
48c0d4d4 | 477 | blk_trace_remove_sysfs(disk_to_dev(disk)); |
8324aa91 JA |
478 | return ret; |
479 | } | |
480 | ||
481 | return 0; | |
482 | } | |
483 | ||
484 | void blk_unregister_queue(struct gendisk *disk) | |
485 | { | |
486 | struct request_queue *q = disk->queue; | |
487 | ||
fb199746 AM |
488 | if (WARN_ON(!q)) |
489 | return; | |
490 | ||
48c0d4d4 | 491 | if (q->request_fn) |
8324aa91 JA |
492 | elv_unregister_queue(q); |
493 | ||
48c0d4d4 ZK |
494 | kobject_uevent(&q->kobj, KOBJ_REMOVE); |
495 | kobject_del(&q->kobj); | |
496 | blk_trace_remove_sysfs(disk_to_dev(disk)); | |
497 | kobject_put(&disk_to_dev(disk)->kobj); | |
8324aa91 | 498 | } |