Commit | Line | Data |
---|---|---|
8324aa91 JA |
1 | /* |
2 | * Functions related to sysfs handling | |
3 | */ | |
4 | #include <linux/kernel.h> | |
5 | #include <linux/module.h> | |
6 | #include <linux/bio.h> | |
7 | #include <linux/blkdev.h> | |
8 | #include <linux/blktrace_api.h> | |
9 | ||
10 | #include "blk.h" | |
11 | ||
12 | struct queue_sysfs_entry { | |
13 | struct attribute attr; | |
14 | ssize_t (*show)(struct request_queue *, char *); | |
15 | ssize_t (*store)(struct request_queue *, const char *, size_t); | |
16 | }; | |
17 | ||
18 | static ssize_t | |
19 | queue_var_show(unsigned int var, char *page) | |
20 | { | |
21 | return sprintf(page, "%d\n", var); | |
22 | } | |
23 | ||
24 | static ssize_t | |
25 | queue_var_store(unsigned long *var, const char *page, size_t count) | |
26 | { | |
27 | char *p = (char *) page; | |
28 | ||
29 | *var = simple_strtoul(p, &p, 10); | |
30 | return count; | |
31 | } | |
32 | ||
33 | static ssize_t queue_requests_show(struct request_queue *q, char *page) | |
34 | { | |
35 | return queue_var_show(q->nr_requests, (page)); | |
36 | } | |
37 | ||
38 | static ssize_t | |
39 | queue_requests_store(struct request_queue *q, const char *page, size_t count) | |
40 | { | |
41 | struct request_list *rl = &q->rq; | |
42 | unsigned long nr; | |
43 | int ret = queue_var_store(&nr, page, count); | |
44 | if (nr < BLKDEV_MIN_RQ) | |
45 | nr = BLKDEV_MIN_RQ; | |
46 | ||
47 | spin_lock_irq(q->queue_lock); | |
48 | q->nr_requests = nr; | |
49 | blk_queue_congestion_threshold(q); | |
50 | ||
51 | if (rl->count[READ] >= queue_congestion_on_threshold(q)) | |
52 | blk_set_queue_congested(q, READ); | |
53 | else if (rl->count[READ] < queue_congestion_off_threshold(q)) | |
54 | blk_clear_queue_congested(q, READ); | |
55 | ||
56 | if (rl->count[WRITE] >= queue_congestion_on_threshold(q)) | |
57 | blk_set_queue_congested(q, WRITE); | |
58 | else if (rl->count[WRITE] < queue_congestion_off_threshold(q)) | |
59 | blk_clear_queue_congested(q, WRITE); | |
60 | ||
61 | if (rl->count[READ] >= q->nr_requests) { | |
62 | blk_set_queue_full(q, READ); | |
63 | } else if (rl->count[READ]+1 <= q->nr_requests) { | |
64 | blk_clear_queue_full(q, READ); | |
65 | wake_up(&rl->wait[READ]); | |
66 | } | |
67 | ||
68 | if (rl->count[WRITE] >= q->nr_requests) { | |
69 | blk_set_queue_full(q, WRITE); | |
70 | } else if (rl->count[WRITE]+1 <= q->nr_requests) { | |
71 | blk_clear_queue_full(q, WRITE); | |
72 | wake_up(&rl->wait[WRITE]); | |
73 | } | |
74 | spin_unlock_irq(q->queue_lock); | |
75 | return ret; | |
76 | } | |
77 | ||
78 | static ssize_t queue_ra_show(struct request_queue *q, char *page) | |
79 | { | |
80 | int ra_kb = q->backing_dev_info.ra_pages << (PAGE_CACHE_SHIFT - 10); | |
81 | ||
82 | return queue_var_show(ra_kb, (page)); | |
83 | } | |
84 | ||
85 | static ssize_t | |
86 | queue_ra_store(struct request_queue *q, const char *page, size_t count) | |
87 | { | |
88 | unsigned long ra_kb; | |
89 | ssize_t ret = queue_var_store(&ra_kb, page, count); | |
90 | ||
8324aa91 | 91 | q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10); |
8324aa91 JA |
92 | |
93 | return ret; | |
94 | } | |
95 | ||
96 | static ssize_t queue_max_sectors_show(struct request_queue *q, char *page) | |
97 | { | |
98 | int max_sectors_kb = q->max_sectors >> 1; | |
99 | ||
100 | return queue_var_show(max_sectors_kb, (page)); | |
101 | } | |
102 | ||
e68b903c MP |
103 | static ssize_t queue_hw_sector_size_show(struct request_queue *q, char *page) |
104 | { | |
105 | return queue_var_show(q->hardsect_size, page); | |
106 | } | |
107 | ||
8324aa91 JA |
108 | static ssize_t |
109 | queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) | |
110 | { | |
111 | unsigned long max_sectors_kb, | |
112 | max_hw_sectors_kb = q->max_hw_sectors >> 1, | |
113 | page_kb = 1 << (PAGE_CACHE_SHIFT - 10); | |
114 | ssize_t ret = queue_var_store(&max_sectors_kb, page, count); | |
115 | ||
116 | if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb) | |
117 | return -EINVAL; | |
7c239517 | 118 | |
8324aa91 JA |
119 | spin_lock_irq(q->queue_lock); |
120 | q->max_sectors = max_sectors_kb << 1; | |
121 | spin_unlock_irq(q->queue_lock); | |
122 | ||
123 | return ret; | |
124 | } | |
125 | ||
126 | static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page) | |
127 | { | |
128 | int max_hw_sectors_kb = q->max_hw_sectors >> 1; | |
129 | ||
130 | return queue_var_show(max_hw_sectors_kb, (page)); | |
131 | } | |
132 | ||
1308835f BZ |
133 | static ssize_t queue_nonrot_show(struct request_queue *q, char *page) |
134 | { | |
135 | return queue_var_show(!blk_queue_nonrot(q), page); | |
136 | } | |
137 | ||
138 | static ssize_t queue_nonrot_store(struct request_queue *q, const char *page, | |
139 | size_t count) | |
140 | { | |
141 | unsigned long nm; | |
142 | ssize_t ret = queue_var_store(&nm, page, count); | |
143 | ||
144 | spin_lock_irq(q->queue_lock); | |
145 | if (nm) | |
146 | queue_flag_clear(QUEUE_FLAG_NONROT, q); | |
147 | else | |
148 | queue_flag_set(QUEUE_FLAG_NONROT, q); | |
149 | spin_unlock_irq(q->queue_lock); | |
150 | ||
151 | return ret; | |
152 | } | |
153 | ||
ac9fafa1 AB |
154 | static ssize_t queue_nomerges_show(struct request_queue *q, char *page) |
155 | { | |
156 | return queue_var_show(blk_queue_nomerges(q), page); | |
157 | } | |
158 | ||
159 | static ssize_t queue_nomerges_store(struct request_queue *q, const char *page, | |
160 | size_t count) | |
161 | { | |
162 | unsigned long nm; | |
163 | ssize_t ret = queue_var_store(&nm, page, count); | |
164 | ||
bf0f9702 | 165 | spin_lock_irq(q->queue_lock); |
ac9fafa1 | 166 | if (nm) |
bf0f9702 | 167 | queue_flag_set(QUEUE_FLAG_NOMERGES, q); |
ac9fafa1 | 168 | else |
bf0f9702 | 169 | queue_flag_clear(QUEUE_FLAG_NOMERGES, q); |
bf0f9702 | 170 | spin_unlock_irq(q->queue_lock); |
1308835f | 171 | |
ac9fafa1 AB |
172 | return ret; |
173 | } | |
174 | ||
c7c22e4d JA |
175 | static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page) |
176 | { | |
177 | unsigned int set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags); | |
178 | ||
179 | return queue_var_show(set != 0, page); | |
180 | } | |
181 | ||
182 | static ssize_t | |
183 | queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count) | |
184 | { | |
185 | ssize_t ret = -EINVAL; | |
186 | #if defined(CONFIG_USE_GENERIC_SMP_HELPERS) | |
187 | unsigned long val; | |
188 | ||
189 | ret = queue_var_store(&val, page, count); | |
190 | spin_lock_irq(q->queue_lock); | |
191 | if (val) | |
192 | queue_flag_set(QUEUE_FLAG_SAME_COMP, q); | |
193 | else | |
194 | queue_flag_clear(QUEUE_FLAG_SAME_COMP, q); | |
195 | spin_unlock_irq(q->queue_lock); | |
196 | #endif | |
197 | return ret; | |
198 | } | |
8324aa91 JA |
199 | |
200 | static struct queue_sysfs_entry queue_requests_entry = { | |
201 | .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR }, | |
202 | .show = queue_requests_show, | |
203 | .store = queue_requests_store, | |
204 | }; | |
205 | ||
206 | static struct queue_sysfs_entry queue_ra_entry = { | |
207 | .attr = {.name = "read_ahead_kb", .mode = S_IRUGO | S_IWUSR }, | |
208 | .show = queue_ra_show, | |
209 | .store = queue_ra_store, | |
210 | }; | |
211 | ||
212 | static struct queue_sysfs_entry queue_max_sectors_entry = { | |
213 | .attr = {.name = "max_sectors_kb", .mode = S_IRUGO | S_IWUSR }, | |
214 | .show = queue_max_sectors_show, | |
215 | .store = queue_max_sectors_store, | |
216 | }; | |
217 | ||
218 | static struct queue_sysfs_entry queue_max_hw_sectors_entry = { | |
219 | .attr = {.name = "max_hw_sectors_kb", .mode = S_IRUGO }, | |
220 | .show = queue_max_hw_sectors_show, | |
221 | }; | |
222 | ||
223 | static struct queue_sysfs_entry queue_iosched_entry = { | |
224 | .attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR }, | |
225 | .show = elv_iosched_show, | |
226 | .store = elv_iosched_store, | |
227 | }; | |
228 | ||
e68b903c MP |
229 | static struct queue_sysfs_entry queue_hw_sector_size_entry = { |
230 | .attr = {.name = "hw_sector_size", .mode = S_IRUGO }, | |
231 | .show = queue_hw_sector_size_show, | |
232 | }; | |
233 | ||
1308835f BZ |
234 | static struct queue_sysfs_entry queue_nonrot_entry = { |
235 | .attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR }, | |
236 | .show = queue_nonrot_show, | |
237 | .store = queue_nonrot_store, | |
238 | }; | |
239 | ||
ac9fafa1 AB |
240 | static struct queue_sysfs_entry queue_nomerges_entry = { |
241 | .attr = {.name = "nomerges", .mode = S_IRUGO | S_IWUSR }, | |
242 | .show = queue_nomerges_show, | |
243 | .store = queue_nomerges_store, | |
244 | }; | |
245 | ||
c7c22e4d JA |
246 | static struct queue_sysfs_entry queue_rq_affinity_entry = { |
247 | .attr = {.name = "rq_affinity", .mode = S_IRUGO | S_IWUSR }, | |
248 | .show = queue_rq_affinity_show, | |
249 | .store = queue_rq_affinity_store, | |
250 | }; | |
251 | ||
8324aa91 JA |
252 | static struct attribute *default_attrs[] = { |
253 | &queue_requests_entry.attr, | |
254 | &queue_ra_entry.attr, | |
255 | &queue_max_hw_sectors_entry.attr, | |
256 | &queue_max_sectors_entry.attr, | |
257 | &queue_iosched_entry.attr, | |
e68b903c | 258 | &queue_hw_sector_size_entry.attr, |
1308835f | 259 | &queue_nonrot_entry.attr, |
ac9fafa1 | 260 | &queue_nomerges_entry.attr, |
c7c22e4d | 261 | &queue_rq_affinity_entry.attr, |
8324aa91 JA |
262 | NULL, |
263 | }; | |
264 | ||
265 | #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr) | |
266 | ||
267 | static ssize_t | |
268 | queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page) | |
269 | { | |
270 | struct queue_sysfs_entry *entry = to_queue(attr); | |
271 | struct request_queue *q = | |
272 | container_of(kobj, struct request_queue, kobj); | |
273 | ssize_t res; | |
274 | ||
275 | if (!entry->show) | |
276 | return -EIO; | |
277 | mutex_lock(&q->sysfs_lock); | |
278 | if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) { | |
279 | mutex_unlock(&q->sysfs_lock); | |
280 | return -ENOENT; | |
281 | } | |
282 | res = entry->show(q, page); | |
283 | mutex_unlock(&q->sysfs_lock); | |
284 | return res; | |
285 | } | |
286 | ||
287 | static ssize_t | |
288 | queue_attr_store(struct kobject *kobj, struct attribute *attr, | |
289 | const char *page, size_t length) | |
290 | { | |
291 | struct queue_sysfs_entry *entry = to_queue(attr); | |
6728cb0e | 292 | struct request_queue *q; |
8324aa91 JA |
293 | ssize_t res; |
294 | ||
295 | if (!entry->store) | |
296 | return -EIO; | |
6728cb0e JA |
297 | |
298 | q = container_of(kobj, struct request_queue, kobj); | |
8324aa91 JA |
299 | mutex_lock(&q->sysfs_lock); |
300 | if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) { | |
301 | mutex_unlock(&q->sysfs_lock); | |
302 | return -ENOENT; | |
303 | } | |
304 | res = entry->store(q, page, length); | |
305 | mutex_unlock(&q->sysfs_lock); | |
306 | return res; | |
307 | } | |
308 | ||
309 | /** | |
310 | * blk_cleanup_queue: - release a &struct request_queue when it is no longer needed | |
311 | * @kobj: the kobj belonging of the request queue to be released | |
312 | * | |
313 | * Description: | |
314 | * blk_cleanup_queue is the pair to blk_init_queue() or | |
315 | * blk_queue_make_request(). It should be called when a request queue is | |
316 | * being released; typically when a block device is being de-registered. | |
317 | * Currently, its primary task it to free all the &struct request | |
318 | * structures that were allocated to the queue and the queue itself. | |
319 | * | |
320 | * Caveat: | |
321 | * Hopefully the low level driver will have finished any | |
322 | * outstanding requests first... | |
323 | **/ | |
324 | static void blk_release_queue(struct kobject *kobj) | |
325 | { | |
326 | struct request_queue *q = | |
327 | container_of(kobj, struct request_queue, kobj); | |
328 | struct request_list *rl = &q->rq; | |
329 | ||
330 | blk_sync_queue(q); | |
331 | ||
332 | if (rl->rq_pool) | |
333 | mempool_destroy(rl->rq_pool); | |
334 | ||
335 | if (q->queue_tags) | |
336 | __blk_queue_free_tags(q); | |
337 | ||
338 | blk_trace_shutdown(q); | |
339 | ||
340 | bdi_destroy(&q->backing_dev_info); | |
341 | kmem_cache_free(blk_requestq_cachep, q); | |
342 | } | |
343 | ||
344 | static struct sysfs_ops queue_sysfs_ops = { | |
345 | .show = queue_attr_show, | |
346 | .store = queue_attr_store, | |
347 | }; | |
348 | ||
349 | struct kobj_type blk_queue_ktype = { | |
350 | .sysfs_ops = &queue_sysfs_ops, | |
351 | .default_attrs = default_attrs, | |
352 | .release = blk_release_queue, | |
353 | }; | |
354 | ||
355 | int blk_register_queue(struct gendisk *disk) | |
356 | { | |
357 | int ret; | |
358 | ||
359 | struct request_queue *q = disk->queue; | |
360 | ||
fb199746 | 361 | if (WARN_ON(!q)) |
8324aa91 JA |
362 | return -ENXIO; |
363 | ||
fb199746 AM |
364 | if (!q->request_fn) |
365 | return 0; | |
366 | ||
ed9e1982 | 367 | ret = kobject_add(&q->kobj, kobject_get(&disk_to_dev(disk)->kobj), |
8324aa91 JA |
368 | "%s", "queue"); |
369 | if (ret < 0) | |
370 | return ret; | |
371 | ||
372 | kobject_uevent(&q->kobj, KOBJ_ADD); | |
373 | ||
374 | ret = elv_register_queue(q); | |
375 | if (ret) { | |
376 | kobject_uevent(&q->kobj, KOBJ_REMOVE); | |
377 | kobject_del(&q->kobj); | |
378 | return ret; | |
379 | } | |
380 | ||
381 | return 0; | |
382 | } | |
383 | ||
384 | void blk_unregister_queue(struct gendisk *disk) | |
385 | { | |
386 | struct request_queue *q = disk->queue; | |
387 | ||
fb199746 AM |
388 | if (WARN_ON(!q)) |
389 | return; | |
390 | ||
391 | if (q->request_fn) { | |
8324aa91 JA |
392 | elv_unregister_queue(q); |
393 | ||
394 | kobject_uevent(&q->kobj, KOBJ_REMOVE); | |
395 | kobject_del(&q->kobj); | |
ed9e1982 | 396 | kobject_put(&disk_to_dev(disk)->kobj); |
8324aa91 JA |
397 | } |
398 | } |