Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
1da177e4 LT |
2 | * Deadline i/o scheduler. |
3 | * | |
4 | * Copyright (C) 2002 Jens Axboe <axboe@suse.de> | |
5 | */ | |
6 | #include <linux/kernel.h> | |
7 | #include <linux/fs.h> | |
8 | #include <linux/blkdev.h> | |
9 | #include <linux/elevator.h> | |
10 | #include <linux/bio.h> | |
1da177e4 LT |
11 | #include <linux/module.h> |
12 | #include <linux/slab.h> | |
13 | #include <linux/init.h> | |
14 | #include <linux/compiler.h> | |
1da177e4 LT |
15 | #include <linux/rbtree.h> |
16 | ||
17 | /* | |
18 | * See Documentation/block/deadline-iosched.txt | |
19 | */ | |
64100099 AV |
20 | static const int read_expire = HZ / 2; /* max time before a read is submitted. */ |
21 | static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */ | |
22 | static const int writes_starved = 2; /* max times reads can starve a write */ | |
23 | static const int fifo_batch = 16; /* # of sequential requests treated as one | |
1da177e4 LT |
24 | by the above parameters. For throughput. */ |
25 | ||
1da177e4 LT |
26 | struct deadline_data { |
27 | /* | |
28 | * run time data | |
29 | */ | |
30 | ||
31 | /* | |
32 | * requests (deadline_rq s) are present on both sort_list and fifo_list | |
33 | */ | |
34 | struct rb_root sort_list[2]; | |
35 | struct list_head fifo_list[2]; | |
36 | ||
37 | /* | |
38 | * next in sort order. read, write or both are NULL | |
39 | */ | |
40 | struct deadline_rq *next_drq[2]; | |
1da177e4 LT |
41 | unsigned int batching; /* number of sequential requests made */ |
42 | sector_t last_sector; /* head position */ | |
43 | unsigned int starved; /* times reads have starved writes */ | |
44 | ||
45 | /* | |
46 | * settings that change how the i/o scheduler behaves | |
47 | */ | |
48 | int fifo_expire[2]; | |
49 | int fifo_batch; | |
50 | int writes_starved; | |
51 | int front_merges; | |
52 | ||
53 | mempool_t *drq_pool; | |
54 | }; | |
55 | ||
56 | /* | |
57 | * pre-request data. | |
58 | */ | |
59 | struct deadline_rq { | |
1da177e4 LT |
60 | struct request *request; |
61 | ||
1da177e4 LT |
62 | /* |
63 | * expire fifo | |
64 | */ | |
65 | struct list_head fifo; | |
66 | unsigned long expires; | |
67 | }; | |
68 | ||
69 | static void deadline_move_request(struct deadline_data *dd, struct deadline_rq *drq); | |
70 | ||
71 | static kmem_cache_t *drq_pool; | |
72 | ||
73 | #define RQ_DATA(rq) ((struct deadline_rq *) (rq)->elevator_private) | |
74 | ||
b8aca35a JA |
75 | #define RQ_RB_ROOT(dd, rq) (&(dd)->sort_list[rq_data_dir((rq))]) |
76 | #define DRQ_RB_ROOT(dd, drq) RQ_RB_ROOT((drq)->request) | |
1da177e4 LT |
77 | |
78 | static void | |
b8aca35a | 79 | deadline_add_drq_rb(struct deadline_data *dd, struct request *rq) |
1da177e4 | 80 | { |
b8aca35a JA |
81 | struct rb_root *root = RQ_RB_ROOT(dd, rq); |
82 | struct request *__alias; | |
1da177e4 LT |
83 | |
84 | retry: | |
b8aca35a JA |
85 | __alias = elv_rb_add(root, rq); |
86 | if (unlikely(__alias)) { | |
87 | deadline_move_request(dd, RQ_DATA(__alias)); | |
88 | goto retry; | |
1da177e4 | 89 | } |
1da177e4 LT |
90 | } |
91 | ||
92 | static inline void | |
93 | deadline_del_drq_rb(struct deadline_data *dd, struct deadline_rq *drq) | |
94 | { | |
b8aca35a JA |
95 | struct request *rq = drq->request; |
96 | const int data_dir = rq_data_dir(rq); | |
1da177e4 LT |
97 | |
98 | if (dd->next_drq[data_dir] == drq) { | |
b8aca35a | 99 | struct rb_node *rbnext = rb_next(&rq->rb_node); |
1da177e4 LT |
100 | |
101 | dd->next_drq[data_dir] = NULL; | |
102 | if (rbnext) | |
b8aca35a | 103 | dd->next_drq[data_dir] = RQ_DATA(rb_entry_rq(rbnext)); |
1da177e4 LT |
104 | } |
105 | ||
b8aca35a | 106 | elv_rb_del(RQ_RB_ROOT(dd, rq), rq); |
1da177e4 LT |
107 | } |
108 | ||
109 | /* | |
110 | * add drq to rbtree and fifo | |
111 | */ | |
b4878f24 | 112 | static void |
1da177e4 LT |
113 | deadline_add_request(struct request_queue *q, struct request *rq) |
114 | { | |
115 | struct deadline_data *dd = q->elevator->elevator_data; | |
116 | struct deadline_rq *drq = RQ_DATA(rq); | |
1da177e4 LT |
117 | const int data_dir = rq_data_dir(drq->request); |
118 | ||
b8aca35a | 119 | deadline_add_drq_rb(dd, rq); |
9817064b | 120 | |
1da177e4 LT |
121 | /* |
122 | * set expire time (only used for reads) and add to fifo list | |
123 | */ | |
124 | drq->expires = jiffies + dd->fifo_expire[data_dir]; | |
125 | list_add_tail(&drq->fifo, &dd->fifo_list[data_dir]); | |
1da177e4 LT |
126 | } |
127 | ||
128 | /* | |
9817064b | 129 | * remove rq from rbtree and fifo. |
1da177e4 LT |
130 | */ |
131 | static void deadline_remove_request(request_queue_t *q, struct request *rq) | |
132 | { | |
133 | struct deadline_rq *drq = RQ_DATA(rq); | |
b4878f24 | 134 | struct deadline_data *dd = q->elevator->elevator_data; |
1da177e4 | 135 | |
b4878f24 | 136 | list_del_init(&drq->fifo); |
b4878f24 | 137 | deadline_del_drq_rb(dd, drq); |
1da177e4 LT |
138 | } |
139 | ||
140 | static int | |
141 | deadline_merge(request_queue_t *q, struct request **req, struct bio *bio) | |
142 | { | |
143 | struct deadline_data *dd = q->elevator->elevator_data; | |
144 | struct request *__rq; | |
145 | int ret; | |
146 | ||
1da177e4 LT |
147 | /* |
148 | * check for front merge | |
149 | */ | |
150 | if (dd->front_merges) { | |
b8aca35a | 151 | sector_t sector = bio->bi_sector + bio_sectors(bio); |
1da177e4 | 152 | |
b8aca35a | 153 | __rq = elv_rb_find(&dd->sort_list[bio_data_dir(bio)], sector); |
1da177e4 | 154 | if (__rq) { |
b8aca35a | 155 | BUG_ON(sector != __rq->sector); |
1da177e4 LT |
156 | |
157 | if (elv_rq_merge_ok(__rq, bio)) { | |
158 | ret = ELEVATOR_FRONT_MERGE; | |
159 | goto out; | |
160 | } | |
161 | } | |
162 | } | |
163 | ||
164 | return ELEVATOR_NO_MERGE; | |
165 | out: | |
1da177e4 LT |
166 | *req = __rq; |
167 | return ret; | |
168 | } | |
169 | ||
b8aca35a JA |
170 | static void deadline_merged_request(request_queue_t *q, struct request *req, |
171 | int type) | |
1da177e4 LT |
172 | { |
173 | struct deadline_data *dd = q->elevator->elevator_data; | |
1da177e4 | 174 | |
1da177e4 LT |
175 | /* |
176 | * if the merge was a front merge, we need to reposition request | |
177 | */ | |
b8aca35a JA |
178 | if (type == ELEVATOR_FRONT_MERGE) { |
179 | elv_rb_del(RQ_RB_ROOT(dd, req), req); | |
180 | deadline_add_drq_rb(dd, req); | |
1da177e4 | 181 | } |
1da177e4 LT |
182 | } |
183 | ||
184 | static void | |
185 | deadline_merged_requests(request_queue_t *q, struct request *req, | |
186 | struct request *next) | |
187 | { | |
1da177e4 LT |
188 | struct deadline_rq *drq = RQ_DATA(req); |
189 | struct deadline_rq *dnext = RQ_DATA(next); | |
190 | ||
191 | BUG_ON(!drq); | |
192 | BUG_ON(!dnext); | |
193 | ||
1da177e4 LT |
194 | /* |
195 | * if dnext expires before drq, assign its expire time to drq | |
196 | * and move into dnext position (dnext will be deleted) in fifo | |
197 | */ | |
198 | if (!list_empty(&drq->fifo) && !list_empty(&dnext->fifo)) { | |
199 | if (time_before(dnext->expires, drq->expires)) { | |
200 | list_move(&drq->fifo, &dnext->fifo); | |
201 | drq->expires = dnext->expires; | |
202 | } | |
203 | } | |
204 | ||
205 | /* | |
206 | * kill knowledge of next, this one is a goner | |
207 | */ | |
208 | deadline_remove_request(q, next); | |
209 | } | |
210 | ||
211 | /* | |
212 | * move request from sort list to dispatch queue. | |
213 | */ | |
214 | static inline void | |
215 | deadline_move_to_dispatch(struct deadline_data *dd, struct deadline_rq *drq) | |
216 | { | |
217 | request_queue_t *q = drq->request->q; | |
218 | ||
219 | deadline_remove_request(q, drq->request); | |
b4878f24 | 220 | elv_dispatch_add_tail(q, drq->request); |
1da177e4 LT |
221 | } |
222 | ||
223 | /* | |
224 | * move an entry to dispatch queue | |
225 | */ | |
226 | static void | |
227 | deadline_move_request(struct deadline_data *dd, struct deadline_rq *drq) | |
228 | { | |
b8aca35a JA |
229 | struct request *rq = drq->request; |
230 | const int data_dir = rq_data_dir(rq); | |
231 | struct rb_node *rbnext = rb_next(&rq->rb_node); | |
1da177e4 LT |
232 | |
233 | dd->next_drq[READ] = NULL; | |
234 | dd->next_drq[WRITE] = NULL; | |
235 | ||
236 | if (rbnext) | |
b8aca35a | 237 | dd->next_drq[data_dir] = RQ_DATA(rb_entry_rq(rbnext)); |
1da177e4 LT |
238 | |
239 | dd->last_sector = drq->request->sector + drq->request->nr_sectors; | |
240 | ||
241 | /* | |
242 | * take it off the sort and fifo list, move | |
243 | * to dispatch queue | |
244 | */ | |
245 | deadline_move_to_dispatch(dd, drq); | |
246 | } | |
247 | ||
248 | #define list_entry_fifo(ptr) list_entry((ptr), struct deadline_rq, fifo) | |
249 | ||
250 | /* | |
251 | * deadline_check_fifo returns 0 if there are no expired reads on the fifo, | |
252 | * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir]) | |
253 | */ | |
254 | static inline int deadline_check_fifo(struct deadline_data *dd, int ddir) | |
255 | { | |
256 | struct deadline_rq *drq = list_entry_fifo(dd->fifo_list[ddir].next); | |
257 | ||
258 | /* | |
259 | * drq is expired! | |
260 | */ | |
261 | if (time_after(jiffies, drq->expires)) | |
262 | return 1; | |
263 | ||
264 | return 0; | |
265 | } | |
266 | ||
267 | /* | |
268 | * deadline_dispatch_requests selects the best request according to | |
269 | * read/write expire, fifo_batch, etc | |
270 | */ | |
b4878f24 | 271 | static int deadline_dispatch_requests(request_queue_t *q, int force) |
1da177e4 | 272 | { |
b4878f24 | 273 | struct deadline_data *dd = q->elevator->elevator_data; |
1da177e4 LT |
274 | const int reads = !list_empty(&dd->fifo_list[READ]); |
275 | const int writes = !list_empty(&dd->fifo_list[WRITE]); | |
276 | struct deadline_rq *drq; | |
4b0dc07e | 277 | int data_dir; |
1da177e4 LT |
278 | |
279 | /* | |
280 | * batches are currently reads XOR writes | |
281 | */ | |
9d5c1e1b AM |
282 | if (dd->next_drq[WRITE]) |
283 | drq = dd->next_drq[WRITE]; | |
284 | else | |
285 | drq = dd->next_drq[READ]; | |
1da177e4 LT |
286 | |
287 | if (drq) { | |
288 | /* we have a "next request" */ | |
289 | ||
290 | if (dd->last_sector != drq->request->sector) | |
291 | /* end the batch on a non sequential request */ | |
292 | dd->batching += dd->fifo_batch; | |
293 | ||
294 | if (dd->batching < dd->fifo_batch) | |
295 | /* we are still entitled to batch */ | |
296 | goto dispatch_request; | |
297 | } | |
298 | ||
299 | /* | |
300 | * at this point we are not running a batch. select the appropriate | |
301 | * data direction (read / write) | |
302 | */ | |
303 | ||
304 | if (reads) { | |
dd67d051 | 305 | BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[READ])); |
1da177e4 LT |
306 | |
307 | if (writes && (dd->starved++ >= dd->writes_starved)) | |
308 | goto dispatch_writes; | |
309 | ||
310 | data_dir = READ; | |
1da177e4 LT |
311 | |
312 | goto dispatch_find_request; | |
313 | } | |
314 | ||
315 | /* | |
316 | * there are either no reads or writes have been starved | |
317 | */ | |
318 | ||
319 | if (writes) { | |
320 | dispatch_writes: | |
dd67d051 | 321 | BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[WRITE])); |
1da177e4 LT |
322 | |
323 | dd->starved = 0; | |
324 | ||
325 | data_dir = WRITE; | |
1da177e4 LT |
326 | |
327 | goto dispatch_find_request; | |
328 | } | |
329 | ||
330 | return 0; | |
331 | ||
332 | dispatch_find_request: | |
333 | /* | |
334 | * we are not running a batch, find best request for selected data_dir | |
335 | */ | |
336 | if (deadline_check_fifo(dd, data_dir)) { | |
337 | /* An expired request exists - satisfy it */ | |
338 | dd->batching = 0; | |
339 | drq = list_entry_fifo(dd->fifo_list[data_dir].next); | |
340 | ||
341 | } else if (dd->next_drq[data_dir]) { | |
342 | /* | |
343 | * The last req was the same dir and we have a next request in | |
344 | * sort order. No expired requests so continue on from here. | |
345 | */ | |
346 | drq = dd->next_drq[data_dir]; | |
347 | } else { | |
b8aca35a JA |
348 | struct rb_node *n; |
349 | ||
1da177e4 LT |
350 | /* |
351 | * The last req was the other direction or we have run out of | |
352 | * higher-sectored requests. Go back to the lowest sectored | |
353 | * request (1 way elevator) and start a new batch. | |
354 | */ | |
355 | dd->batching = 0; | |
b8aca35a JA |
356 | n = rb_first(&dd->sort_list[data_dir]); |
357 | if (n) | |
358 | drq = RQ_DATA(rb_entry_rq(n)); | |
1da177e4 LT |
359 | } |
360 | ||
361 | dispatch_request: | |
362 | /* | |
363 | * drq is the selected appropriate request. | |
364 | */ | |
365 | dd->batching++; | |
366 | deadline_move_request(dd, drq); | |
367 | ||
368 | return 1; | |
369 | } | |
370 | ||
1da177e4 LT |
371 | static int deadline_queue_empty(request_queue_t *q) |
372 | { | |
373 | struct deadline_data *dd = q->elevator->elevator_data; | |
374 | ||
b4878f24 JA |
375 | return list_empty(&dd->fifo_list[WRITE]) |
376 | && list_empty(&dd->fifo_list[READ]); | |
1da177e4 LT |
377 | } |
378 | ||
1da177e4 LT |
379 | static void deadline_exit_queue(elevator_t *e) |
380 | { | |
381 | struct deadline_data *dd = e->elevator_data; | |
382 | ||
383 | BUG_ON(!list_empty(&dd->fifo_list[READ])); | |
384 | BUG_ON(!list_empty(&dd->fifo_list[WRITE])); | |
385 | ||
386 | mempool_destroy(dd->drq_pool); | |
1da177e4 LT |
387 | kfree(dd); |
388 | } | |
389 | ||
390 | /* | |
391 | * initialize elevator private data (deadline_data), and alloc a drq for | |
392 | * each request on the free lists | |
393 | */ | |
bc1c1169 | 394 | static void *deadline_init_queue(request_queue_t *q, elevator_t *e) |
1da177e4 LT |
395 | { |
396 | struct deadline_data *dd; | |
1da177e4 LT |
397 | |
398 | if (!drq_pool) | |
bc1c1169 | 399 | return NULL; |
1da177e4 | 400 | |
1946089a | 401 | dd = kmalloc_node(sizeof(*dd), GFP_KERNEL, q->node); |
1da177e4 | 402 | if (!dd) |
bc1c1169 | 403 | return NULL; |
1da177e4 LT |
404 | memset(dd, 0, sizeof(*dd)); |
405 | ||
1946089a CL |
406 | dd->drq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab, |
407 | mempool_free_slab, drq_pool, q->node); | |
1da177e4 | 408 | if (!dd->drq_pool) { |
1da177e4 | 409 | kfree(dd); |
bc1c1169 | 410 | return NULL; |
1da177e4 LT |
411 | } |
412 | ||
1da177e4 LT |
413 | INIT_LIST_HEAD(&dd->fifo_list[READ]); |
414 | INIT_LIST_HEAD(&dd->fifo_list[WRITE]); | |
415 | dd->sort_list[READ] = RB_ROOT; | |
416 | dd->sort_list[WRITE] = RB_ROOT; | |
1da177e4 LT |
417 | dd->fifo_expire[READ] = read_expire; |
418 | dd->fifo_expire[WRITE] = write_expire; | |
419 | dd->writes_starved = writes_starved; | |
420 | dd->front_merges = 1; | |
421 | dd->fifo_batch = fifo_batch; | |
bc1c1169 | 422 | return dd; |
1da177e4 LT |
423 | } |
424 | ||
425 | static void deadline_put_request(request_queue_t *q, struct request *rq) | |
426 | { | |
427 | struct deadline_data *dd = q->elevator->elevator_data; | |
428 | struct deadline_rq *drq = RQ_DATA(rq); | |
429 | ||
b4878f24 JA |
430 | mempool_free(drq, dd->drq_pool); |
431 | rq->elevator_private = NULL; | |
1da177e4 LT |
432 | } |
433 | ||
434 | static int | |
22e2c507 | 435 | deadline_set_request(request_queue_t *q, struct request *rq, struct bio *bio, |
8267e268 | 436 | gfp_t gfp_mask) |
1da177e4 LT |
437 | { |
438 | struct deadline_data *dd = q->elevator->elevator_data; | |
439 | struct deadline_rq *drq; | |
440 | ||
441 | drq = mempool_alloc(dd->drq_pool, gfp_mask); | |
442 | if (drq) { | |
443 | memset(drq, 0, sizeof(*drq)); | |
1da177e4 LT |
444 | drq->request = rq; |
445 | ||
1da177e4 LT |
446 | INIT_LIST_HEAD(&drq->fifo); |
447 | ||
448 | rq->elevator_private = drq; | |
449 | return 0; | |
450 | } | |
451 | ||
452 | return 1; | |
453 | } | |
454 | ||
455 | /* | |
456 | * sysfs parts below | |
457 | */ | |
1da177e4 LT |
458 | |
459 | static ssize_t | |
460 | deadline_var_show(int var, char *page) | |
461 | { | |
462 | return sprintf(page, "%d\n", var); | |
463 | } | |
464 | ||
465 | static ssize_t | |
466 | deadline_var_store(int *var, const char *page, size_t count) | |
467 | { | |
468 | char *p = (char *) page; | |
469 | ||
470 | *var = simple_strtol(p, &p, 10); | |
471 | return count; | |
472 | } | |
473 | ||
474 | #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ | |
3d1ab40f | 475 | static ssize_t __FUNC(elevator_t *e, char *page) \ |
1da177e4 | 476 | { \ |
3d1ab40f AV |
477 | struct deadline_data *dd = e->elevator_data; \ |
478 | int __data = __VAR; \ | |
1da177e4 LT |
479 | if (__CONV) \ |
480 | __data = jiffies_to_msecs(__data); \ | |
481 | return deadline_var_show(__data, (page)); \ | |
482 | } | |
e572ec7e AV |
483 | SHOW_FUNCTION(deadline_read_expire_show, dd->fifo_expire[READ], 1); |
484 | SHOW_FUNCTION(deadline_write_expire_show, dd->fifo_expire[WRITE], 1); | |
485 | SHOW_FUNCTION(deadline_writes_starved_show, dd->writes_starved, 0); | |
486 | SHOW_FUNCTION(deadline_front_merges_show, dd->front_merges, 0); | |
487 | SHOW_FUNCTION(deadline_fifo_batch_show, dd->fifo_batch, 0); | |
1da177e4 LT |
488 | #undef SHOW_FUNCTION |
489 | ||
490 | #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ | |
3d1ab40f | 491 | static ssize_t __FUNC(elevator_t *e, const char *page, size_t count) \ |
1da177e4 | 492 | { \ |
3d1ab40f | 493 | struct deadline_data *dd = e->elevator_data; \ |
1da177e4 LT |
494 | int __data; \ |
495 | int ret = deadline_var_store(&__data, (page), count); \ | |
496 | if (__data < (MIN)) \ | |
497 | __data = (MIN); \ | |
498 | else if (__data > (MAX)) \ | |
499 | __data = (MAX); \ | |
500 | if (__CONV) \ | |
501 | *(__PTR) = msecs_to_jiffies(__data); \ | |
502 | else \ | |
503 | *(__PTR) = __data; \ | |
504 | return ret; \ | |
505 | } | |
e572ec7e AV |
506 | STORE_FUNCTION(deadline_read_expire_store, &dd->fifo_expire[READ], 0, INT_MAX, 1); |
507 | STORE_FUNCTION(deadline_write_expire_store, &dd->fifo_expire[WRITE], 0, INT_MAX, 1); | |
508 | STORE_FUNCTION(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX, 0); | |
509 | STORE_FUNCTION(deadline_front_merges_store, &dd->front_merges, 0, 1, 0); | |
510 | STORE_FUNCTION(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX, 0); | |
1da177e4 LT |
511 | #undef STORE_FUNCTION |
512 | ||
e572ec7e AV |
513 | #define DD_ATTR(name) \ |
514 | __ATTR(name, S_IRUGO|S_IWUSR, deadline_##name##_show, \ | |
515 | deadline_##name##_store) | |
516 | ||
517 | static struct elv_fs_entry deadline_attrs[] = { | |
518 | DD_ATTR(read_expire), | |
519 | DD_ATTR(write_expire), | |
520 | DD_ATTR(writes_starved), | |
521 | DD_ATTR(front_merges), | |
522 | DD_ATTR(fifo_batch), | |
523 | __ATTR_NULL | |
1da177e4 LT |
524 | }; |
525 | ||
1da177e4 LT |
526 | static struct elevator_type iosched_deadline = { |
527 | .ops = { | |
528 | .elevator_merge_fn = deadline_merge, | |
529 | .elevator_merged_fn = deadline_merged_request, | |
530 | .elevator_merge_req_fn = deadline_merged_requests, | |
b4878f24 JA |
531 | .elevator_dispatch_fn = deadline_dispatch_requests, |
532 | .elevator_add_req_fn = deadline_add_request, | |
1da177e4 | 533 | .elevator_queue_empty_fn = deadline_queue_empty, |
b8aca35a JA |
534 | .elevator_former_req_fn = elv_rb_former_request, |
535 | .elevator_latter_req_fn = elv_rb_latter_request, | |
1da177e4 LT |
536 | .elevator_set_req_fn = deadline_set_request, |
537 | .elevator_put_req_fn = deadline_put_request, | |
538 | .elevator_init_fn = deadline_init_queue, | |
539 | .elevator_exit_fn = deadline_exit_queue, | |
540 | }, | |
541 | ||
3d1ab40f | 542 | .elevator_attrs = deadline_attrs, |
1da177e4 LT |
543 | .elevator_name = "deadline", |
544 | .elevator_owner = THIS_MODULE, | |
545 | }; | |
546 | ||
547 | static int __init deadline_init(void) | |
548 | { | |
549 | int ret; | |
550 | ||
551 | drq_pool = kmem_cache_create("deadline_drq", sizeof(struct deadline_rq), | |
552 | 0, 0, NULL, NULL); | |
553 | ||
554 | if (!drq_pool) | |
555 | return -ENOMEM; | |
556 | ||
557 | ret = elv_register(&iosched_deadline); | |
558 | if (ret) | |
559 | kmem_cache_destroy(drq_pool); | |
560 | ||
561 | return ret; | |
562 | } | |
563 | ||
564 | static void __exit deadline_exit(void) | |
565 | { | |
566 | kmem_cache_destroy(drq_pool); | |
567 | elv_unregister(&iosched_deadline); | |
568 | } | |
569 | ||
570 | module_init(deadline_init); | |
571 | module_exit(deadline_exit); | |
572 | ||
573 | MODULE_AUTHOR("Jens Axboe"); | |
574 | MODULE_LICENSE("GPL"); | |
575 | MODULE_DESCRIPTION("deadline IO scheduler"); |