cfq-iosched: fix merge error
[linux-2.6-block.git] / block / cfq-iosched.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * CFQ, or complete fairness queueing, disk scheduler.
3 *
4 * Based on ideas from a previously unfinished io
5 * scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
6 *
0fe23479 7 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
1da177e4 8 */
1da177e4 9#include <linux/module.h>
1cc9be68
AV
10#include <linux/blkdev.h>
11#include <linux/elevator.h>
1da177e4 12#include <linux/rbtree.h>
22e2c507 13#include <linux/ioprio.h>
7b679138 14#include <linux/blktrace_api.h>
1da177e4
LT
15
16/*
17 * tunables
18 */
fe094d98
JA
19/* max queue in one round of service */
20static const int cfq_quantum = 4;
64100099 21static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
fe094d98
JA
22/* maximum backwards seek, in KiB */
23static const int cfq_back_max = 16 * 1024;
24/* penalty of a backwards seek */
25static const int cfq_back_penalty = 2;
64100099 26static const int cfq_slice_sync = HZ / 10;
3b18152c 27static int cfq_slice_async = HZ / 25;
64100099 28static const int cfq_slice_async_rq = 2;
caaa5f9f 29static int cfq_slice_idle = HZ / 125;
5db5d642
CZ
30static const int cfq_target_latency = HZ * 3/10; /* 300 ms */
31static const int cfq_hist_divisor = 4;
22e2c507 32
d9e7620e 33/*
0871714e 34 * offset from end of service tree
d9e7620e 35 */
0871714e 36#define CFQ_IDLE_DELAY (HZ / 5)
d9e7620e
JA
37
38/*
39 * below this threshold, we consider thinktime immediate
40 */
41#define CFQ_MIN_TT (2)
42
e6c5bc73
JM
43/*
44 * Allow merged cfqqs to perform this amount of seeky I/O before
45 * deciding to break the queues up again.
46 */
47#define CFQQ_COOP_TOUT (HZ)
48
22e2c507 49#define CFQ_SLICE_SCALE (5)
45333d5a 50#define CFQ_HW_QUEUE_MIN (5)
22e2c507 51
fe094d98
JA
52#define RQ_CIC(rq) \
53 ((struct cfq_io_context *) (rq)->elevator_private)
7b679138 54#define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elevator_private2)
1da177e4 55
e18b890b
CL
56static struct kmem_cache *cfq_pool;
57static struct kmem_cache *cfq_ioc_pool;
1da177e4 58
245b2e70 59static DEFINE_PER_CPU(unsigned long, cfq_ioc_count);
334e94de 60static struct completion *ioc_gone;
9a11b4ed 61static DEFINE_SPINLOCK(ioc_gone_lock);
334e94de 62
22e2c507
JA
63#define CFQ_PRIO_LISTS IOPRIO_BE_NR
64#define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
22e2c507
JA
65#define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
66
206dc69b
JA
67#define sample_valid(samples) ((samples) > 80)
68
cc09e299
JA
69/*
70 * Most of our rbtree usage is for sorting with min extraction, so
71 * if we cache the leftmost node we don't have to walk down the tree
72 * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should
73 * move this into the elevator for the rq sorting as well.
74 */
75struct cfq_rb_root {
76 struct rb_root rb;
77 struct rb_node *left;
aa6f6a3d 78 unsigned count;
cc09e299 79};
aa6f6a3d 80#define CFQ_RB_ROOT (struct cfq_rb_root) { RB_ROOT, NULL, 0, }
cc09e299 81
6118b70b
JA
82/*
83 * Per process-grouping structure
84 */
85struct cfq_queue {
86 /* reference count */
87 atomic_t ref;
88 /* various state flags, see below */
89 unsigned int flags;
90 /* parent cfq_data */
91 struct cfq_data *cfqd;
92 /* service_tree member */
93 struct rb_node rb_node;
94 /* service_tree key */
95 unsigned long rb_key;
96 /* prio tree member */
97 struct rb_node p_node;
98 /* prio tree root we belong to, if any */
99 struct rb_root *p_root;
100 /* sorted list of pending requests */
101 struct rb_root sort_list;
102 /* if fifo isn't expired, next request to serve */
103 struct request *next_rq;
104 /* requests queued in sort_list */
105 int queued[2];
106 /* currently allocated requests */
107 int allocated[2];
108 /* fifo list of requests in sort_list */
109 struct list_head fifo;
110
111 unsigned long slice_end;
112 long slice_resid;
113 unsigned int slice_dispatch;
114
115 /* pending metadata requests */
116 int meta_pending;
117 /* number of requests that are on the dispatch list or inside driver */
118 int dispatched;
119
120 /* io prio of this group */
121 unsigned short ioprio, org_ioprio;
122 unsigned short ioprio_class, org_ioprio_class;
123
b2c18e1e
JM
124 unsigned int seek_samples;
125 u64 seek_total;
126 sector_t seek_mean;
127 sector_t last_request_pos;
e6c5bc73 128 unsigned long seeky_start;
b2c18e1e 129
6118b70b 130 pid_t pid;
df5fe3e8 131
aa6f6a3d 132 struct cfq_rb_root *service_tree;
df5fe3e8 133 struct cfq_queue *new_cfqq;
6118b70b
JA
134};
135
c0324a02 136/*
718eee05 137 * First index in the service_trees.
c0324a02
CZ
138 * IDLE is handled separately, so it has negative index
139 */
140enum wl_prio_t {
141 IDLE_WORKLOAD = -1,
142 BE_WORKLOAD = 0,
143 RT_WORKLOAD = 1
144};
145
718eee05
CZ
146/*
147 * Second index in the service_trees.
148 */
149enum wl_type_t {
150 ASYNC_WORKLOAD = 0,
151 SYNC_NOIDLE_WORKLOAD = 1,
152 SYNC_WORKLOAD = 2
153};
154
155
22e2c507
JA
156/*
157 * Per block device queue structure
158 */
1da177e4 159struct cfq_data {
165125e1 160 struct request_queue *queue;
22e2c507
JA
161
162 /*
c0324a02
CZ
163 * rr lists of queues with requests, onle rr for each priority class.
164 * Counts are embedded in the cfq_rb_root
165 */
718eee05 166 struct cfq_rb_root service_trees[2][3];
c0324a02
CZ
167 struct cfq_rb_root service_tree_idle;
168 /*
169 * The priority currently being served
22e2c507 170 */
c0324a02 171 enum wl_prio_t serving_prio;
718eee05
CZ
172 enum wl_type_t serving_type;
173 unsigned long workload_expires;
a36e71f9
JA
174
175 /*
176 * Each priority tree is sorted by next_request position. These
177 * trees are used when determining if two or more queues are
178 * interleaving requests (see cfq_close_cooperator).
179 */
180 struct rb_root prio_trees[CFQ_PRIO_LISTS];
181
22e2c507 182 unsigned int busy_queues;
5db5d642 183 unsigned int busy_queues_avg[2];
22e2c507 184
5ad531db 185 int rq_in_driver[2];
3ed9a296 186 int sync_flight;
45333d5a
AC
187
188 /*
189 * queue-depth detection
190 */
191 int rq_queued;
25776e35 192 int hw_tag;
45333d5a
AC
193 int hw_tag_samples;
194 int rq_in_driver_peak;
1da177e4 195
22e2c507
JA
196 /*
197 * idle window management
198 */
199 struct timer_list idle_slice_timer;
23e018a1 200 struct work_struct unplug_work;
1da177e4 201
22e2c507
JA
202 struct cfq_queue *active_queue;
203 struct cfq_io_context *active_cic;
22e2c507 204
c2dea2d1
VT
205 /*
206 * async queue for each priority case
207 */
208 struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
209 struct cfq_queue *async_idle_cfqq;
15c31be4 210
6d048f53 211 sector_t last_position;
1da177e4 212
1da177e4
LT
213 /*
214 * tunables, see top of file
215 */
216 unsigned int cfq_quantum;
22e2c507 217 unsigned int cfq_fifo_expire[2];
1da177e4
LT
218 unsigned int cfq_back_penalty;
219 unsigned int cfq_back_max;
22e2c507
JA
220 unsigned int cfq_slice[2];
221 unsigned int cfq_slice_async_rq;
222 unsigned int cfq_slice_idle;
963b72fc 223 unsigned int cfq_latency;
d9ff4187
AV
224
225 struct list_head cic_list;
1da177e4 226
6118b70b
JA
227 /*
228 * Fallback dummy cfqq for extreme OOM conditions
229 */
230 struct cfq_queue oom_cfqq;
365722bb
VG
231
232 unsigned long last_end_sync_rq;
1da177e4
LT
233};
234
c0324a02 235static struct cfq_rb_root *service_tree_for(enum wl_prio_t prio,
718eee05 236 enum wl_type_t type,
c0324a02
CZ
237 struct cfq_data *cfqd)
238{
239 if (prio == IDLE_WORKLOAD)
240 return &cfqd->service_tree_idle;
241
718eee05 242 return &cfqd->service_trees[prio][type];
c0324a02
CZ
243}
244
3b18152c 245enum cfqq_state_flags {
b0b8d749
JA
246 CFQ_CFQQ_FLAG_on_rr = 0, /* on round-robin busy list */
247 CFQ_CFQQ_FLAG_wait_request, /* waiting for a request */
b029195d 248 CFQ_CFQQ_FLAG_must_dispatch, /* must be allowed a dispatch */
b0b8d749 249 CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */
b0b8d749
JA
250 CFQ_CFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */
251 CFQ_CFQQ_FLAG_idle_window, /* slice idling enabled */
252 CFQ_CFQQ_FLAG_prio_changed, /* task priority has changed */
44f7c160 253 CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */
91fac317 254 CFQ_CFQQ_FLAG_sync, /* synchronous queue */
b3b6d040 255 CFQ_CFQQ_FLAG_coop, /* cfqq is shared */
4b27e1bb 256 CFQ_CFQQ_FLAG_coop_preempt, /* coop preempt */
3b18152c
JA
257};
258
259#define CFQ_CFQQ_FNS(name) \
260static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq) \
261{ \
fe094d98 262 (cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name); \
3b18152c
JA
263} \
264static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq) \
265{ \
fe094d98 266 (cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name); \
3b18152c
JA
267} \
268static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \
269{ \
fe094d98 270 return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0; \
3b18152c
JA
271}
272
273CFQ_CFQQ_FNS(on_rr);
274CFQ_CFQQ_FNS(wait_request);
b029195d 275CFQ_CFQQ_FNS(must_dispatch);
3b18152c 276CFQ_CFQQ_FNS(must_alloc_slice);
3b18152c
JA
277CFQ_CFQQ_FNS(fifo_expire);
278CFQ_CFQQ_FNS(idle_window);
279CFQ_CFQQ_FNS(prio_changed);
44f7c160 280CFQ_CFQQ_FNS(slice_new);
91fac317 281CFQ_CFQQ_FNS(sync);
a36e71f9 282CFQ_CFQQ_FNS(coop);
4b27e1bb 283CFQ_CFQQ_FNS(coop_preempt);
3b18152c
JA
284#undef CFQ_CFQQ_FNS
285
7b679138
JA
286#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
287 blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args)
288#define cfq_log(cfqd, fmt, args...) \
289 blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
290
c0324a02
CZ
291static inline enum wl_prio_t cfqq_prio(struct cfq_queue *cfqq)
292{
293 if (cfq_class_idle(cfqq))
294 return IDLE_WORKLOAD;
295 if (cfq_class_rt(cfqq))
296 return RT_WORKLOAD;
297 return BE_WORKLOAD;
298}
299
718eee05
CZ
300
301static enum wl_type_t cfqq_type(struct cfq_queue *cfqq)
302{
303 if (!cfq_cfqq_sync(cfqq))
304 return ASYNC_WORKLOAD;
305 if (!cfq_cfqq_idle_window(cfqq))
306 return SYNC_NOIDLE_WORKLOAD;
307 return SYNC_WORKLOAD;
308}
309
c0324a02
CZ
310static inline int cfq_busy_queues_wl(enum wl_prio_t wl, struct cfq_data *cfqd)
311{
312 if (wl == IDLE_WORKLOAD)
313 return cfqd->service_tree_idle.count;
314
718eee05
CZ
315 return cfqd->service_trees[wl][ASYNC_WORKLOAD].count
316 + cfqd->service_trees[wl][SYNC_NOIDLE_WORKLOAD].count
317 + cfqd->service_trees[wl][SYNC_WORKLOAD].count;
c0324a02
CZ
318}
319
165125e1 320static void cfq_dispatch_insert(struct request_queue *, struct request *);
a6151c3a 321static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool,
fd0928df 322 struct io_context *, gfp_t);
4ac845a2 323static struct cfq_io_context *cfq_cic_lookup(struct cfq_data *,
91fac317
VT
324 struct io_context *);
325
5ad531db
JA
326static inline int rq_in_driver(struct cfq_data *cfqd)
327{
328 return cfqd->rq_in_driver[0] + cfqd->rq_in_driver[1];
329}
330
91fac317 331static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic,
a6151c3a 332 bool is_sync)
91fac317 333{
a6151c3a 334 return cic->cfqq[is_sync];
91fac317
VT
335}
336
337static inline void cic_set_cfqq(struct cfq_io_context *cic,
a6151c3a 338 struct cfq_queue *cfqq, bool is_sync)
91fac317 339{
a6151c3a 340 cic->cfqq[is_sync] = cfqq;
91fac317
VT
341}
342
343/*
344 * We regard a request as SYNC, if it's either a read or has the SYNC bit
345 * set (in which case it could also be direct WRITE).
346 */
a6151c3a 347static inline bool cfq_bio_sync(struct bio *bio)
91fac317 348{
a6151c3a 349 return bio_data_dir(bio) == READ || bio_rw_flagged(bio, BIO_RW_SYNCIO);
91fac317 350}
1da177e4 351
99f95e52
AM
352/*
353 * scheduler run of queue, if there are requests pending and no one in the
354 * driver that will restart queueing
355 */
23e018a1 356static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
99f95e52 357{
7b679138
JA
358 if (cfqd->busy_queues) {
359 cfq_log(cfqd, "schedule dispatch");
23e018a1 360 kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work);
7b679138 361 }
99f95e52
AM
362}
363
165125e1 364static int cfq_queue_empty(struct request_queue *q)
99f95e52
AM
365{
366 struct cfq_data *cfqd = q->elevator->elevator_data;
367
b4878f24 368 return !cfqd->busy_queues;
99f95e52
AM
369}
370
44f7c160
JA
371/*
372 * Scale schedule slice based on io priority. Use the sync time slice only
373 * if a queue is marked sync and has sync io queued. A sync queue with async
374 * io only, should not get full sync slice length.
375 */
a6151c3a 376static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync,
d9e7620e 377 unsigned short prio)
44f7c160 378{
d9e7620e 379 const int base_slice = cfqd->cfq_slice[sync];
44f7c160 380
d9e7620e
JA
381 WARN_ON(prio >= IOPRIO_BE_NR);
382
383 return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio));
384}
44f7c160 385
d9e7620e
JA
386static inline int
387cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
388{
389 return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
44f7c160
JA
390}
391
5db5d642
CZ
392/*
393 * get averaged number of queues of RT/BE priority.
394 * average is updated, with a formula that gives more weight to higher numbers,
395 * to quickly follows sudden increases and decrease slowly
396 */
397
5869619c
JA
398static inline unsigned cfq_get_avg_queues(struct cfq_data *cfqd, bool rt)
399{
5db5d642
CZ
400 unsigned min_q, max_q;
401 unsigned mult = cfq_hist_divisor - 1;
402 unsigned round = cfq_hist_divisor / 2;
c0324a02 403 unsigned busy = cfq_busy_queues_wl(rt, cfqd);
5db5d642
CZ
404
405 min_q = min(cfqd->busy_queues_avg[rt], busy);
406 max_q = max(cfqd->busy_queues_avg[rt], busy);
407 cfqd->busy_queues_avg[rt] = (mult * max_q + min_q + round) /
408 cfq_hist_divisor;
409 return cfqd->busy_queues_avg[rt];
410}
411
44f7c160
JA
412static inline void
413cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
414{
5db5d642
CZ
415 unsigned slice = cfq_prio_to_slice(cfqd, cfqq);
416 if (cfqd->cfq_latency) {
417 /* interested queues (we consider only the ones with the same
418 * priority class) */
419 unsigned iq = cfq_get_avg_queues(cfqd, cfq_class_rt(cfqq));
420 unsigned sync_slice = cfqd->cfq_slice[1];
421 unsigned expect_latency = sync_slice * iq;
422 if (expect_latency > cfq_target_latency) {
423 unsigned base_low_slice = 2 * cfqd->cfq_slice_idle;
424 /* scale low_slice according to IO priority
425 * and sync vs async */
426 unsigned low_slice =
427 min(slice, base_low_slice * slice / sync_slice);
428 /* the adapted slice value is scaled to fit all iqs
429 * into the target latency */
430 slice = max(slice * cfq_target_latency / expect_latency,
431 low_slice);
432 }
433 }
434 cfqq->slice_end = jiffies + slice;
7b679138 435 cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies);
44f7c160
JA
436}
437
438/*
439 * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
440 * isn't valid until the first request from the dispatch is activated
441 * and the slice time set.
442 */
a6151c3a 443static inline bool cfq_slice_used(struct cfq_queue *cfqq)
44f7c160
JA
444{
445 if (cfq_cfqq_slice_new(cfqq))
446 return 0;
447 if (time_before(jiffies, cfqq->slice_end))
448 return 0;
449
450 return 1;
451}
452
1da177e4 453/*
5e705374 454 * Lifted from AS - choose which of rq1 and rq2 that is best served now.
1da177e4 455 * We choose the request that is closest to the head right now. Distance
e8a99053 456 * behind the head is penalized and only allowed to a certain extent.
1da177e4 457 */
5e705374
JA
458static struct request *
459cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2)
1da177e4
LT
460{
461 sector_t last, s1, s2, d1 = 0, d2 = 0;
1da177e4 462 unsigned long back_max;
e8a99053
AM
463#define CFQ_RQ1_WRAP 0x01 /* request 1 wraps */
464#define CFQ_RQ2_WRAP 0x02 /* request 2 wraps */
465 unsigned wrap = 0; /* bit mask: requests behind the disk head? */
1da177e4 466
5e705374
JA
467 if (rq1 == NULL || rq1 == rq2)
468 return rq2;
469 if (rq2 == NULL)
470 return rq1;
9c2c38a1 471
5e705374
JA
472 if (rq_is_sync(rq1) && !rq_is_sync(rq2))
473 return rq1;
474 else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
475 return rq2;
374f84ac
JA
476 if (rq_is_meta(rq1) && !rq_is_meta(rq2))
477 return rq1;
478 else if (rq_is_meta(rq2) && !rq_is_meta(rq1))
479 return rq2;
1da177e4 480
83096ebf
TH
481 s1 = blk_rq_pos(rq1);
482 s2 = blk_rq_pos(rq2);
1da177e4 483
6d048f53 484 last = cfqd->last_position;
1da177e4 485
1da177e4
LT
486 /*
487 * by definition, 1KiB is 2 sectors
488 */
489 back_max = cfqd->cfq_back_max * 2;
490
491 /*
492 * Strict one way elevator _except_ in the case where we allow
493 * short backward seeks which are biased as twice the cost of a
494 * similar forward seek.
495 */
496 if (s1 >= last)
497 d1 = s1 - last;
498 else if (s1 + back_max >= last)
499 d1 = (last - s1) * cfqd->cfq_back_penalty;
500 else
e8a99053 501 wrap |= CFQ_RQ1_WRAP;
1da177e4
LT
502
503 if (s2 >= last)
504 d2 = s2 - last;
505 else if (s2 + back_max >= last)
506 d2 = (last - s2) * cfqd->cfq_back_penalty;
507 else
e8a99053 508 wrap |= CFQ_RQ2_WRAP;
1da177e4
LT
509
510 /* Found required data */
e8a99053
AM
511
512 /*
513 * By doing switch() on the bit mask "wrap" we avoid having to
514 * check two variables for all permutations: --> faster!
515 */
516 switch (wrap) {
5e705374 517 case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
e8a99053 518 if (d1 < d2)
5e705374 519 return rq1;
e8a99053 520 else if (d2 < d1)
5e705374 521 return rq2;
e8a99053
AM
522 else {
523 if (s1 >= s2)
5e705374 524 return rq1;
e8a99053 525 else
5e705374 526 return rq2;
e8a99053 527 }
1da177e4 528
e8a99053 529 case CFQ_RQ2_WRAP:
5e705374 530 return rq1;
e8a99053 531 case CFQ_RQ1_WRAP:
5e705374
JA
532 return rq2;
533 case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
e8a99053
AM
534 default:
535 /*
536 * Since both rqs are wrapped,
537 * start with the one that's further behind head
538 * (--> only *one* back seek required),
539 * since back seek takes more time than forward.
540 */
541 if (s1 <= s2)
5e705374 542 return rq1;
1da177e4 543 else
5e705374 544 return rq2;
1da177e4
LT
545 }
546}
547
498d3aa2
JA
548/*
549 * The below is leftmost cache rbtree addon
550 */
0871714e 551static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root)
cc09e299
JA
552{
553 if (!root->left)
554 root->left = rb_first(&root->rb);
555
0871714e
JA
556 if (root->left)
557 return rb_entry(root->left, struct cfq_queue, rb_node);
558
559 return NULL;
cc09e299
JA
560}
561
a36e71f9
JA
562static void rb_erase_init(struct rb_node *n, struct rb_root *root)
563{
564 rb_erase(n, root);
565 RB_CLEAR_NODE(n);
566}
567
cc09e299
JA
568static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
569{
570 if (root->left == n)
571 root->left = NULL;
a36e71f9 572 rb_erase_init(n, &root->rb);
aa6f6a3d 573 --root->count;
cc09e299
JA
574}
575
1da177e4
LT
576/*
577 * would be nice to take fifo expire time into account as well
578 */
5e705374
JA
579static struct request *
580cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
581 struct request *last)
1da177e4 582{
21183b07
JA
583 struct rb_node *rbnext = rb_next(&last->rb_node);
584 struct rb_node *rbprev = rb_prev(&last->rb_node);
5e705374 585 struct request *next = NULL, *prev = NULL;
1da177e4 586
21183b07 587 BUG_ON(RB_EMPTY_NODE(&last->rb_node));
1da177e4
LT
588
589 if (rbprev)
5e705374 590 prev = rb_entry_rq(rbprev);
1da177e4 591
21183b07 592 if (rbnext)
5e705374 593 next = rb_entry_rq(rbnext);
21183b07
JA
594 else {
595 rbnext = rb_first(&cfqq->sort_list);
596 if (rbnext && rbnext != &last->rb_node)
5e705374 597 next = rb_entry_rq(rbnext);
21183b07 598 }
1da177e4 599
21183b07 600 return cfq_choose_req(cfqd, next, prev);
1da177e4
LT
601}
602
d9e7620e
JA
603static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
604 struct cfq_queue *cfqq)
1da177e4 605{
d9e7620e
JA
606 /*
607 * just an approximation, should be ok.
608 */
67e6b49e
JA
609 return (cfqd->busy_queues - 1) * (cfq_prio_slice(cfqd, 1, 0) -
610 cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio));
d9e7620e
JA
611}
612
498d3aa2 613/*
c0324a02 614 * The cfqd->service_trees holds all pending cfq_queue's that have
498d3aa2
JA
615 * requests waiting to be processed. It is sorted in the order that
616 * we will service the queues.
617 */
a36e71f9 618static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
a6151c3a 619 bool add_front)
d9e7620e 620{
0871714e
JA
621 struct rb_node **p, *parent;
622 struct cfq_queue *__cfqq;
d9e7620e 623 unsigned long rb_key;
c0324a02 624 struct cfq_rb_root *service_tree;
498d3aa2 625 int left;
d9e7620e 626
718eee05 627 service_tree = service_tree_for(cfqq_prio(cfqq), cfqq_type(cfqq), cfqd);
0871714e
JA
628 if (cfq_class_idle(cfqq)) {
629 rb_key = CFQ_IDLE_DELAY;
aa6f6a3d 630 parent = rb_last(&service_tree->rb);
0871714e
JA
631 if (parent && parent != &cfqq->rb_node) {
632 __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
633 rb_key += __cfqq->rb_key;
634 } else
635 rb_key += jiffies;
636 } else if (!add_front) {
b9c8946b
JA
637 /*
638 * Get our rb key offset. Subtract any residual slice
639 * value carried from last service. A negative resid
640 * count indicates slice overrun, and this should position
641 * the next service time further away in the tree.
642 */
edd75ffd 643 rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
b9c8946b 644 rb_key -= cfqq->slice_resid;
edd75ffd 645 cfqq->slice_resid = 0;
48e025e6
CZ
646 } else {
647 rb_key = -HZ;
aa6f6a3d 648 __cfqq = cfq_rb_first(service_tree);
48e025e6
CZ
649 rb_key += __cfqq ? __cfqq->rb_key : jiffies;
650 }
1da177e4 651
d9e7620e 652 if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
99f9628a 653 /*
d9e7620e 654 * same position, nothing more to do
99f9628a 655 */
c0324a02
CZ
656 if (rb_key == cfqq->rb_key &&
657 cfqq->service_tree == service_tree)
d9e7620e 658 return;
1da177e4 659
aa6f6a3d
CZ
660 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
661 cfqq->service_tree = NULL;
1da177e4 662 }
d9e7620e 663
498d3aa2 664 left = 1;
0871714e 665 parent = NULL;
aa6f6a3d
CZ
666 cfqq->service_tree = service_tree;
667 p = &service_tree->rb.rb_node;
d9e7620e 668 while (*p) {
67060e37 669 struct rb_node **n;
cc09e299 670
d9e7620e
JA
671 parent = *p;
672 __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
673
0c534e0a 674 /*
c0324a02 675 * sort by key, that represents service time.
0c534e0a 676 */
c0324a02 677 if (time_before(rb_key, __cfqq->rb_key))
67060e37 678 n = &(*p)->rb_left;
c0324a02 679 else {
67060e37 680 n = &(*p)->rb_right;
cc09e299 681 left = 0;
c0324a02 682 }
67060e37
JA
683
684 p = n;
d9e7620e
JA
685 }
686
cc09e299 687 if (left)
aa6f6a3d 688 service_tree->left = &cfqq->rb_node;
cc09e299 689
d9e7620e
JA
690 cfqq->rb_key = rb_key;
691 rb_link_node(&cfqq->rb_node, parent, p);
aa6f6a3d
CZ
692 rb_insert_color(&cfqq->rb_node, &service_tree->rb);
693 service_tree->count++;
1da177e4
LT
694}
695
a36e71f9 696static struct cfq_queue *
f2d1f0ae
JA
697cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root,
698 sector_t sector, struct rb_node **ret_parent,
699 struct rb_node ***rb_link)
a36e71f9 700{
a36e71f9
JA
701 struct rb_node **p, *parent;
702 struct cfq_queue *cfqq = NULL;
703
704 parent = NULL;
705 p = &root->rb_node;
706 while (*p) {
707 struct rb_node **n;
708
709 parent = *p;
710 cfqq = rb_entry(parent, struct cfq_queue, p_node);
711
712 /*
713 * Sort strictly based on sector. Smallest to the left,
714 * largest to the right.
715 */
2e46e8b2 716 if (sector > blk_rq_pos(cfqq->next_rq))
a36e71f9 717 n = &(*p)->rb_right;
2e46e8b2 718 else if (sector < blk_rq_pos(cfqq->next_rq))
a36e71f9
JA
719 n = &(*p)->rb_left;
720 else
721 break;
722 p = n;
3ac6c9f8 723 cfqq = NULL;
a36e71f9
JA
724 }
725
726 *ret_parent = parent;
727 if (rb_link)
728 *rb_link = p;
3ac6c9f8 729 return cfqq;
a36e71f9
JA
730}
731
732static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq)
733{
a36e71f9
JA
734 struct rb_node **p, *parent;
735 struct cfq_queue *__cfqq;
736
f2d1f0ae
JA
737 if (cfqq->p_root) {
738 rb_erase(&cfqq->p_node, cfqq->p_root);
739 cfqq->p_root = NULL;
740 }
a36e71f9
JA
741
742 if (cfq_class_idle(cfqq))
743 return;
744 if (!cfqq->next_rq)
745 return;
746
f2d1f0ae 747 cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio];
2e46e8b2
TH
748 __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root,
749 blk_rq_pos(cfqq->next_rq), &parent, &p);
3ac6c9f8
JA
750 if (!__cfqq) {
751 rb_link_node(&cfqq->p_node, parent, p);
f2d1f0ae
JA
752 rb_insert_color(&cfqq->p_node, cfqq->p_root);
753 } else
754 cfqq->p_root = NULL;
a36e71f9
JA
755}
756
498d3aa2
JA
757/*
758 * Update cfqq's position in the service tree.
759 */
edd75ffd 760static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
6d048f53 761{
6d048f53
JA
762 /*
763 * Resorting requires the cfqq to be on the RR list already.
764 */
a36e71f9 765 if (cfq_cfqq_on_rr(cfqq)) {
edd75ffd 766 cfq_service_tree_add(cfqd, cfqq, 0);
a36e71f9
JA
767 cfq_prio_tree_add(cfqd, cfqq);
768 }
6d048f53
JA
769}
770
1da177e4
LT
771/*
772 * add to busy list of queues for service, trying to be fair in ordering
22e2c507 773 * the pending list according to last request service
1da177e4 774 */
febffd61 775static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1da177e4 776{
7b679138 777 cfq_log_cfqq(cfqd, cfqq, "add_to_rr");
3b18152c
JA
778 BUG_ON(cfq_cfqq_on_rr(cfqq));
779 cfq_mark_cfqq_on_rr(cfqq);
1da177e4
LT
780 cfqd->busy_queues++;
781
edd75ffd 782 cfq_resort_rr_list(cfqd, cfqq);
1da177e4
LT
783}
784
498d3aa2
JA
785/*
786 * Called when the cfqq no longer has requests pending, remove it from
787 * the service tree.
788 */
febffd61 789static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1da177e4 790{
7b679138 791 cfq_log_cfqq(cfqd, cfqq, "del_from_rr");
3b18152c
JA
792 BUG_ON(!cfq_cfqq_on_rr(cfqq));
793 cfq_clear_cfqq_on_rr(cfqq);
1da177e4 794
aa6f6a3d
CZ
795 if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
796 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
797 cfqq->service_tree = NULL;
798 }
f2d1f0ae
JA
799 if (cfqq->p_root) {
800 rb_erase(&cfqq->p_node, cfqq->p_root);
801 cfqq->p_root = NULL;
802 }
d9e7620e 803
1da177e4
LT
804 BUG_ON(!cfqd->busy_queues);
805 cfqd->busy_queues--;
806}
807
808/*
809 * rb tree support functions
810 */
febffd61 811static void cfq_del_rq_rb(struct request *rq)
1da177e4 812{
5e705374 813 struct cfq_queue *cfqq = RQ_CFQQ(rq);
b4878f24 814 struct cfq_data *cfqd = cfqq->cfqd;
5e705374 815 const int sync = rq_is_sync(rq);
1da177e4 816
b4878f24
JA
817 BUG_ON(!cfqq->queued[sync]);
818 cfqq->queued[sync]--;
1da177e4 819
5e705374 820 elv_rb_del(&cfqq->sort_list, rq);
1da177e4 821
dd67d051 822 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
b4878f24 823 cfq_del_cfqq_rr(cfqd, cfqq);
1da177e4
LT
824}
825
5e705374 826static void cfq_add_rq_rb(struct request *rq)
1da177e4 827{
5e705374 828 struct cfq_queue *cfqq = RQ_CFQQ(rq);
1da177e4 829 struct cfq_data *cfqd = cfqq->cfqd;
a36e71f9 830 struct request *__alias, *prev;
1da177e4 831
5380a101 832 cfqq->queued[rq_is_sync(rq)]++;
1da177e4
LT
833
834 /*
835 * looks a little odd, but the first insert might return an alias.
836 * if that happens, put the alias on the dispatch list
837 */
21183b07 838 while ((__alias = elv_rb_add(&cfqq->sort_list, rq)) != NULL)
5e705374 839 cfq_dispatch_insert(cfqd->queue, __alias);
5fccbf61
JA
840
841 if (!cfq_cfqq_on_rr(cfqq))
842 cfq_add_cfqq_rr(cfqd, cfqq);
5044eed4
JA
843
844 /*
845 * check if this request is a better next-serve candidate
846 */
a36e71f9 847 prev = cfqq->next_rq;
5044eed4 848 cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq);
a36e71f9
JA
849
850 /*
851 * adjust priority tree position, if ->next_rq changes
852 */
853 if (prev != cfqq->next_rq)
854 cfq_prio_tree_add(cfqd, cfqq);
855
5044eed4 856 BUG_ON(!cfqq->next_rq);
1da177e4
LT
857}
858
febffd61 859static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
1da177e4 860{
5380a101
JA
861 elv_rb_del(&cfqq->sort_list, rq);
862 cfqq->queued[rq_is_sync(rq)]--;
5e705374 863 cfq_add_rq_rb(rq);
1da177e4
LT
864}
865
206dc69b
JA
866static struct request *
867cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
1da177e4 868{
206dc69b 869 struct task_struct *tsk = current;
91fac317 870 struct cfq_io_context *cic;
206dc69b 871 struct cfq_queue *cfqq;
1da177e4 872
4ac845a2 873 cic = cfq_cic_lookup(cfqd, tsk->io_context);
91fac317
VT
874 if (!cic)
875 return NULL;
876
877 cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
89850f7e
JA
878 if (cfqq) {
879 sector_t sector = bio->bi_sector + bio_sectors(bio);
880
21183b07 881 return elv_rb_find(&cfqq->sort_list, sector);
89850f7e 882 }
1da177e4 883
1da177e4
LT
884 return NULL;
885}
886
165125e1 887static void cfq_activate_request(struct request_queue *q, struct request *rq)
1da177e4 888{
22e2c507 889 struct cfq_data *cfqd = q->elevator->elevator_data;
3b18152c 890
5ad531db 891 cfqd->rq_in_driver[rq_is_sync(rq)]++;
7b679138 892 cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
5ad531db 893 rq_in_driver(cfqd));
25776e35 894
5b93629b 895 cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
1da177e4
LT
896}
897
165125e1 898static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
1da177e4 899{
b4878f24 900 struct cfq_data *cfqd = q->elevator->elevator_data;
5ad531db 901 const int sync = rq_is_sync(rq);
b4878f24 902
5ad531db
JA
903 WARN_ON(!cfqd->rq_in_driver[sync]);
904 cfqd->rq_in_driver[sync]--;
7b679138 905 cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d",
5ad531db 906 rq_in_driver(cfqd));
1da177e4
LT
907}
908
b4878f24 909static void cfq_remove_request(struct request *rq)
1da177e4 910{
5e705374 911 struct cfq_queue *cfqq = RQ_CFQQ(rq);
21183b07 912
5e705374
JA
913 if (cfqq->next_rq == rq)
914 cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
1da177e4 915
b4878f24 916 list_del_init(&rq->queuelist);
5e705374 917 cfq_del_rq_rb(rq);
374f84ac 918
45333d5a 919 cfqq->cfqd->rq_queued--;
374f84ac
JA
920 if (rq_is_meta(rq)) {
921 WARN_ON(!cfqq->meta_pending);
922 cfqq->meta_pending--;
923 }
1da177e4
LT
924}
925
165125e1
JA
926static int cfq_merge(struct request_queue *q, struct request **req,
927 struct bio *bio)
1da177e4
LT
928{
929 struct cfq_data *cfqd = q->elevator->elevator_data;
930 struct request *__rq;
1da177e4 931
206dc69b 932 __rq = cfq_find_rq_fmerge(cfqd, bio);
22e2c507 933 if (__rq && elv_rq_merge_ok(__rq, bio)) {
9817064b
JA
934 *req = __rq;
935 return ELEVATOR_FRONT_MERGE;
1da177e4
LT
936 }
937
938 return ELEVATOR_NO_MERGE;
1da177e4
LT
939}
940
165125e1 941static void cfq_merged_request(struct request_queue *q, struct request *req,
21183b07 942 int type)
1da177e4 943{
21183b07 944 if (type == ELEVATOR_FRONT_MERGE) {
5e705374 945 struct cfq_queue *cfqq = RQ_CFQQ(req);
1da177e4 946
5e705374 947 cfq_reposition_rq_rb(cfqq, req);
1da177e4 948 }
1da177e4
LT
949}
950
951static void
165125e1 952cfq_merged_requests(struct request_queue *q, struct request *rq,
1da177e4
LT
953 struct request *next)
954{
22e2c507
JA
955 /*
956 * reposition in fifo if next is older than rq
957 */
958 if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
30996f40 959 time_before(rq_fifo_time(next), rq_fifo_time(rq))) {
22e2c507 960 list_move(&rq->queuelist, &next->queuelist);
30996f40
JA
961 rq_set_fifo_time(rq, rq_fifo_time(next));
962 }
22e2c507 963
b4878f24 964 cfq_remove_request(next);
22e2c507
JA
965}
966
165125e1 967static int cfq_allow_merge(struct request_queue *q, struct request *rq,
da775265
JA
968 struct bio *bio)
969{
970 struct cfq_data *cfqd = q->elevator->elevator_data;
91fac317 971 struct cfq_io_context *cic;
da775265 972 struct cfq_queue *cfqq;
da775265
JA
973
974 /*
ec8acb69 975 * Disallow merge of a sync bio into an async request.
da775265 976 */
91fac317 977 if (cfq_bio_sync(bio) && !rq_is_sync(rq))
a6151c3a 978 return false;
da775265
JA
979
980 /*
719d3402
JA
981 * Lookup the cfqq that this bio will be queued with. Allow
982 * merge only if rq is queued there.
da775265 983 */
4ac845a2 984 cic = cfq_cic_lookup(cfqd, current->io_context);
91fac317 985 if (!cic)
a6151c3a 986 return false;
719d3402 987
91fac317 988 cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
a6151c3a 989 return cfqq == RQ_CFQQ(rq);
da775265
JA
990}
991
febffd61
JA
992static void __cfq_set_active_queue(struct cfq_data *cfqd,
993 struct cfq_queue *cfqq)
22e2c507
JA
994{
995 if (cfqq) {
7b679138 996 cfq_log_cfqq(cfqd, cfqq, "set_active");
22e2c507 997 cfqq->slice_end = 0;
2f5cb738
JA
998 cfqq->slice_dispatch = 0;
999
2f5cb738 1000 cfq_clear_cfqq_wait_request(cfqq);
b029195d 1001 cfq_clear_cfqq_must_dispatch(cfqq);
3b18152c
JA
1002 cfq_clear_cfqq_must_alloc_slice(cfqq);
1003 cfq_clear_cfqq_fifo_expire(cfqq);
44f7c160 1004 cfq_mark_cfqq_slice_new(cfqq);
2f5cb738
JA
1005
1006 del_timer(&cfqd->idle_slice_timer);
22e2c507
JA
1007 }
1008
1009 cfqd->active_queue = cfqq;
1010}
1011
7b14e3b5
JA
1012/*
1013 * current cfqq expired its slice (or was too idle), select new one
1014 */
1015static void
1016__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
a6151c3a 1017 bool timed_out)
7b14e3b5 1018{
7b679138
JA
1019 cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
1020
7b14e3b5
JA
1021 if (cfq_cfqq_wait_request(cfqq))
1022 del_timer(&cfqd->idle_slice_timer);
1023
7b14e3b5
JA
1024 cfq_clear_cfqq_wait_request(cfqq);
1025
1026 /*
6084cdda 1027 * store what was left of this slice, if the queue idled/timed out
7b14e3b5 1028 */
7b679138 1029 if (timed_out && !cfq_cfqq_slice_new(cfqq)) {
c5b680f3 1030 cfqq->slice_resid = cfqq->slice_end - jiffies;
7b679138
JA
1031 cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid);
1032 }
7b14e3b5 1033
edd75ffd 1034 cfq_resort_rr_list(cfqd, cfqq);
7b14e3b5
JA
1035
1036 if (cfqq == cfqd->active_queue)
1037 cfqd->active_queue = NULL;
1038
1039 if (cfqd->active_cic) {
1040 put_io_context(cfqd->active_cic->ioc);
1041 cfqd->active_cic = NULL;
1042 }
7b14e3b5
JA
1043}
1044
a6151c3a 1045static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
7b14e3b5
JA
1046{
1047 struct cfq_queue *cfqq = cfqd->active_queue;
1048
1049 if (cfqq)
6084cdda 1050 __cfq_slice_expired(cfqd, cfqq, timed_out);
7b14e3b5
JA
1051}
1052
498d3aa2
JA
1053/*
1054 * Get next queue for service. Unless we have a queue preemption,
1055 * we'll simply select the first cfqq in the service tree.
1056 */
6d048f53 1057static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
22e2c507 1058{
c0324a02 1059 struct cfq_rb_root *service_tree =
718eee05 1060 service_tree_for(cfqd->serving_prio, cfqd->serving_type, cfqd);
d9e7620e 1061
c0324a02
CZ
1062 if (RB_EMPTY_ROOT(&service_tree->rb))
1063 return NULL;
1064 return cfq_rb_first(service_tree);
6d048f53
JA
1065}
1066
498d3aa2
JA
1067/*
1068 * Get and set a new active queue for service.
1069 */
a36e71f9
JA
1070static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
1071 struct cfq_queue *cfqq)
6d048f53 1072{
a36e71f9
JA
1073 if (!cfqq) {
1074 cfqq = cfq_get_next_queue(cfqd);
6d048f53 1075
4b27e1bb 1076 if (cfqq && !cfq_cfqq_coop_preempt(cfqq))
a36e71f9
JA
1077 cfq_clear_cfqq_coop(cfqq);
1078 }
6d048f53 1079
4b27e1bb
SL
1080 if (cfqq)
1081 cfq_clear_cfqq_coop_preempt(cfqq);
1082
22e2c507 1083 __cfq_set_active_queue(cfqd, cfqq);
3b18152c 1084 return cfqq;
22e2c507
JA
1085}
1086
d9e7620e
JA
1087static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
1088 struct request *rq)
1089{
83096ebf
TH
1090 if (blk_rq_pos(rq) >= cfqd->last_position)
1091 return blk_rq_pos(rq) - cfqd->last_position;
d9e7620e 1092 else
83096ebf 1093 return cfqd->last_position - blk_rq_pos(rq);
d9e7620e
JA
1094}
1095
b2c18e1e
JM
1096#define CFQQ_SEEK_THR 8 * 1024
1097#define CFQQ_SEEKY(cfqq) ((cfqq)->seek_mean > CFQQ_SEEK_THR)
04dc6e71 1098
b2c18e1e
JM
1099static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1100 struct request *rq)
6d048f53 1101{
b2c18e1e 1102 sector_t sdist = cfqq->seek_mean;
6d048f53 1103
b2c18e1e
JM
1104 if (!sample_valid(cfqq->seek_samples))
1105 sdist = CFQQ_SEEK_THR;
6d048f53 1106
04dc6e71 1107 return cfq_dist_from_last(cfqd, rq) <= sdist;
6d048f53
JA
1108}
1109
a36e71f9
JA
1110static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
1111 struct cfq_queue *cur_cfqq)
1112{
f2d1f0ae 1113 struct rb_root *root = &cfqd->prio_trees[cur_cfqq->org_ioprio];
a36e71f9
JA
1114 struct rb_node *parent, *node;
1115 struct cfq_queue *__cfqq;
1116 sector_t sector = cfqd->last_position;
1117
1118 if (RB_EMPTY_ROOT(root))
1119 return NULL;
1120
1121 /*
1122 * First, if we find a request starting at the end of the last
1123 * request, choose it.
1124 */
f2d1f0ae 1125 __cfqq = cfq_prio_tree_lookup(cfqd, root, sector, &parent, NULL);
a36e71f9
JA
1126 if (__cfqq)
1127 return __cfqq;
1128
1129 /*
1130 * If the exact sector wasn't found, the parent of the NULL leaf
1131 * will contain the closest sector.
1132 */
1133 __cfqq = rb_entry(parent, struct cfq_queue, p_node);
b2c18e1e 1134 if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
a36e71f9
JA
1135 return __cfqq;
1136
2e46e8b2 1137 if (blk_rq_pos(__cfqq->next_rq) < sector)
a36e71f9
JA
1138 node = rb_next(&__cfqq->p_node);
1139 else
1140 node = rb_prev(&__cfqq->p_node);
1141 if (!node)
1142 return NULL;
1143
1144 __cfqq = rb_entry(node, struct cfq_queue, p_node);
b2c18e1e 1145 if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
a36e71f9
JA
1146 return __cfqq;
1147
1148 return NULL;
1149}
1150
1151/*
1152 * cfqd - obvious
1153 * cur_cfqq - passed in so that we don't decide that the current queue is
1154 * closely cooperating with itself.
1155 *
1156 * So, basically we're assuming that that cur_cfqq has dispatched at least
1157 * one request, and that cfqd->last_position reflects a position on the disk
1158 * associated with the I/O issued by cur_cfqq. I'm not sure this is a valid
1159 * assumption.
1160 */
1161static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
b3b6d040 1162 struct cfq_queue *cur_cfqq)
6d048f53 1163{
a36e71f9
JA
1164 struct cfq_queue *cfqq;
1165
e6c5bc73
JM
1166 if (!cfq_cfqq_sync(cur_cfqq))
1167 return NULL;
1168 if (CFQQ_SEEKY(cur_cfqq))
1169 return NULL;
1170
6d048f53 1171 /*
d9e7620e
JA
1172 * We should notice if some of the queues are cooperating, eg
1173 * working closely on the same area of the disk. In that case,
1174 * we can group them together and don't waste time idling.
6d048f53 1175 */
a36e71f9
JA
1176 cfqq = cfqq_close(cfqd, cur_cfqq);
1177 if (!cfqq)
1178 return NULL;
1179
df5fe3e8
JM
1180 /*
1181 * It only makes sense to merge sync queues.
1182 */
1183 if (!cfq_cfqq_sync(cfqq))
1184 return NULL;
e6c5bc73
JM
1185 if (CFQQ_SEEKY(cfqq))
1186 return NULL;
df5fe3e8 1187
c0324a02
CZ
1188 /*
1189 * Do not merge queues of different priority classes
1190 */
1191 if (cfq_class_rt(cfqq) != cfq_class_rt(cur_cfqq))
1192 return NULL;
1193
a36e71f9 1194 return cfqq;
6d048f53
JA
1195}
1196
a6d44e98
CZ
1197/*
1198 * Determine whether we should enforce idle window for this queue.
1199 */
1200
1201static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1202{
1203 enum wl_prio_t prio = cfqq_prio(cfqq);
718eee05 1204 struct cfq_rb_root *service_tree = cfqq->service_tree;
a6d44e98
CZ
1205
1206 /* We never do for idle class queues. */
1207 if (prio == IDLE_WORKLOAD)
1208 return false;
1209
1210 /* We do for queues that were marked with idle window flag. */
1211 if (cfq_cfqq_idle_window(cfqq))
1212 return true;
1213
1214 /*
1215 * Otherwise, we do only if they are the last ones
1216 * in their service tree.
1217 */
718eee05
CZ
1218 if (!service_tree)
1219 service_tree = service_tree_for(prio, cfqq_type(cfqq), cfqd);
1220
a6d44e98
CZ
1221 if (service_tree->count == 0)
1222 return true;
1223
1224 return (service_tree->count == 1 && cfq_rb_first(service_tree) == cfqq);
1225}
1226
6d048f53 1227static void cfq_arm_slice_timer(struct cfq_data *cfqd)
22e2c507 1228{
1792669c 1229 struct cfq_queue *cfqq = cfqd->active_queue;
206dc69b 1230 struct cfq_io_context *cic;
7b14e3b5
JA
1231 unsigned long sl;
1232
a68bbddb 1233 /*
f7d7b7a7
JA
1234 * SSD device without seek penalty, disable idling. But only do so
1235 * for devices that support queuing, otherwise we still have a problem
1236 * with sync vs async workloads.
a68bbddb 1237 */
f7d7b7a7 1238 if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)
a68bbddb
JA
1239 return;
1240
dd67d051 1241 WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
6d048f53 1242 WARN_ON(cfq_cfqq_slice_new(cfqq));
22e2c507
JA
1243
1244 /*
1245 * idle is disabled, either manually or by past process history
1246 */
a6d44e98 1247 if (!cfqd->cfq_slice_idle || !cfq_should_idle(cfqd, cfqq))
6d048f53
JA
1248 return;
1249
7b679138
JA
1250 /*
1251 * still requests with the driver, don't idle
1252 */
5ad531db 1253 if (rq_in_driver(cfqd))
7b679138
JA
1254 return;
1255
22e2c507
JA
1256 /*
1257 * task has exited, don't wait
1258 */
206dc69b 1259 cic = cfqd->active_cic;
66dac98e 1260 if (!cic || !atomic_read(&cic->ioc->nr_tasks))
6d048f53
JA
1261 return;
1262
355b659c
CZ
1263 /*
1264 * If our average think time is larger than the remaining time
1265 * slice, then don't idle. This avoids overrunning the allotted
1266 * time slice.
1267 */
1268 if (sample_valid(cic->ttime_samples) &&
1269 (cfqq->slice_end - jiffies < cic->ttime_mean))
1270 return;
1271
3b18152c 1272 cfq_mark_cfqq_wait_request(cfqq);
22e2c507 1273
6d048f53 1274 sl = cfqd->cfq_slice_idle;
718eee05
CZ
1275 /* are we servicing noidle tree, and there are more queues?
1276 * non-rotational or NCQ: no idle
1277 * non-NCQ rotational : very small idle, to allow
1278 * fair distribution of slice time for a process doing back-to-back
1279 * seeks.
1280 */
1281 if (cfqd->serving_type == SYNC_NOIDLE_WORKLOAD &&
1282 service_tree_for(cfqd->serving_prio, SYNC_NOIDLE_WORKLOAD, cfqd)
1283 ->count > 0) {
1284 if (blk_queue_nonrot(cfqd->queue) || cfqd->hw_tag)
1285 return;
d9e7620e 1286 sl = min(sl, msecs_to_jiffies(CFQ_MIN_TT));
718eee05 1287 }
206dc69b 1288
7b14e3b5 1289 mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
9481ffdc 1290 cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu", sl);
1da177e4
LT
1291}
1292
498d3aa2
JA
1293/*
1294 * Move request from internal lists to the request queue dispatch list.
1295 */
165125e1 1296static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
1da177e4 1297{
3ed9a296 1298 struct cfq_data *cfqd = q->elevator->elevator_data;
5e705374 1299 struct cfq_queue *cfqq = RQ_CFQQ(rq);
22e2c507 1300
7b679138
JA
1301 cfq_log_cfqq(cfqd, cfqq, "dispatch_insert");
1302
06d21886 1303 cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq);
5380a101 1304 cfq_remove_request(rq);
6d048f53 1305 cfqq->dispatched++;
5380a101 1306 elv_dispatch_sort(q, rq);
3ed9a296
JA
1307
1308 if (cfq_cfqq_sync(cfqq))
1309 cfqd->sync_flight++;
1da177e4
LT
1310}
1311
1312/*
1313 * return expired entry, or NULL to just start from scratch in rbtree
1314 */
febffd61 1315static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
1da177e4 1316{
30996f40 1317 struct request *rq = NULL;
1da177e4 1318
3b18152c 1319 if (cfq_cfqq_fifo_expire(cfqq))
1da177e4 1320 return NULL;
cb887411
JA
1321
1322 cfq_mark_cfqq_fifo_expire(cfqq);
1323
89850f7e
JA
1324 if (list_empty(&cfqq->fifo))
1325 return NULL;
1da177e4 1326
89850f7e 1327 rq = rq_entry_fifo(cfqq->fifo.next);
30996f40 1328 if (time_before(jiffies, rq_fifo_time(rq)))
7b679138 1329 rq = NULL;
1da177e4 1330
30996f40 1331 cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
6d048f53 1332 return rq;
1da177e4
LT
1333}
1334
22e2c507
JA
1335static inline int
1336cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1337{
1338 const int base_rq = cfqd->cfq_slice_async_rq;
1da177e4 1339
22e2c507 1340 WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
1da177e4 1341
22e2c507 1342 return 2 * (base_rq + base_rq * (CFQ_PRIO_LISTS - 1 - cfqq->ioprio));
1da177e4
LT
1343}
1344
df5fe3e8
JM
1345/*
1346 * Must be called with the queue_lock held.
1347 */
1348static int cfqq_process_refs(struct cfq_queue *cfqq)
1349{
1350 int process_refs, io_refs;
1351
1352 io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE];
1353 process_refs = atomic_read(&cfqq->ref) - io_refs;
1354 BUG_ON(process_refs < 0);
1355 return process_refs;
1356}
1357
1358static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
1359{
e6c5bc73 1360 int process_refs, new_process_refs;
df5fe3e8
JM
1361 struct cfq_queue *__cfqq;
1362
1363 /* Avoid a circular list and skip interim queue merges */
1364 while ((__cfqq = new_cfqq->new_cfqq)) {
1365 if (__cfqq == cfqq)
1366 return;
1367 new_cfqq = __cfqq;
1368 }
1369
1370 process_refs = cfqq_process_refs(cfqq);
1371 /*
1372 * If the process for the cfqq has gone away, there is no
1373 * sense in merging the queues.
1374 */
1375 if (process_refs == 0)
1376 return;
1377
e6c5bc73
JM
1378 /*
1379 * Merge in the direction of the lesser amount of work.
1380 */
1381 new_process_refs = cfqq_process_refs(new_cfqq);
1382 if (new_process_refs >= process_refs) {
1383 cfqq->new_cfqq = new_cfqq;
1384 atomic_add(process_refs, &new_cfqq->ref);
1385 } else {
1386 new_cfqq->new_cfqq = cfqq;
1387 atomic_add(new_process_refs, &cfqq->ref);
1388 }
df5fe3e8
JM
1389}
1390
718eee05
CZ
1391static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd, enum wl_prio_t prio,
1392 bool prio_changed)
1393{
1394 struct cfq_queue *queue;
1395 int i;
1396 bool key_valid = false;
1397 unsigned long lowest_key = 0;
1398 enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD;
1399
1400 if (prio_changed) {
1401 /*
1402 * When priorities switched, we prefer starting
1403 * from SYNC_NOIDLE (first choice), or just SYNC
1404 * over ASYNC
1405 */
1406 if (service_tree_for(prio, cur_best, cfqd)->count)
1407 return cur_best;
1408 cur_best = SYNC_WORKLOAD;
1409 if (service_tree_for(prio, cur_best, cfqd)->count)
1410 return cur_best;
1411
1412 return ASYNC_WORKLOAD;
1413 }
1414
1415 for (i = 0; i < 3; ++i) {
1416 /* otherwise, select the one with lowest rb_key */
1417 queue = cfq_rb_first(service_tree_for(prio, i, cfqd));
1418 if (queue &&
1419 (!key_valid || time_before(queue->rb_key, lowest_key))) {
1420 lowest_key = queue->rb_key;
1421 cur_best = i;
1422 key_valid = true;
1423 }
1424 }
1425
1426 return cur_best;
1427}
1428
1429static void choose_service_tree(struct cfq_data *cfqd)
1430{
1431 enum wl_prio_t previous_prio = cfqd->serving_prio;
1432 bool prio_changed;
1433 unsigned slice;
1434 unsigned count;
1435
1436 /* Choose next priority. RT > BE > IDLE */
1437 if (cfq_busy_queues_wl(RT_WORKLOAD, cfqd))
1438 cfqd->serving_prio = RT_WORKLOAD;
1439 else if (cfq_busy_queues_wl(BE_WORKLOAD, cfqd))
1440 cfqd->serving_prio = BE_WORKLOAD;
1441 else {
1442 cfqd->serving_prio = IDLE_WORKLOAD;
1443 cfqd->workload_expires = jiffies + 1;
1444 return;
1445 }
1446
1447 /*
1448 * For RT and BE, we have to choose also the type
1449 * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload
1450 * expiration time
1451 */
1452 prio_changed = (cfqd->serving_prio != previous_prio);
1453 count = service_tree_for(cfqd->serving_prio, cfqd->serving_type, cfqd)
1454 ->count;
1455
1456 /*
1457 * If priority didn't change, check workload expiration,
1458 * and that we still have other queues ready
1459 */
1460 if (!prio_changed && count &&
1461 !time_after(jiffies, cfqd->workload_expires))
1462 return;
1463
1464 /* otherwise select new workload type */
1465 cfqd->serving_type =
1466 cfq_choose_wl(cfqd, cfqd->serving_prio, prio_changed);
1467 count = service_tree_for(cfqd->serving_prio, cfqd->serving_type, cfqd)
1468 ->count;
1469
1470 /*
1471 * the workload slice is computed as a fraction of target latency
1472 * proportional to the number of queues in that workload, over
1473 * all the queues in the same priority class
1474 */
1475 slice = cfq_target_latency * count /
1476 max_t(unsigned, cfqd->busy_queues_avg[cfqd->serving_prio],
1477 cfq_busy_queues_wl(cfqd->serving_prio, cfqd));
1478
1479 if (cfqd->serving_type == ASYNC_WORKLOAD)
1480 /* async workload slice is scaled down according to
1481 * the sync/async slice ratio. */
1482 slice = slice * cfqd->cfq_slice[0] / cfqd->cfq_slice[1];
1483 else
1484 /* sync workload slice is at least 2 * cfq_slice_idle */
1485 slice = max(slice, 2 * cfqd->cfq_slice_idle);
1486
1487 slice = max_t(unsigned, slice, CFQ_MIN_TT);
1488 cfqd->workload_expires = jiffies + slice;
1489}
1490
22e2c507 1491/*
498d3aa2
JA
1492 * Select a queue for service. If we have a current active queue,
1493 * check whether to continue servicing it, or retrieve and set a new one.
22e2c507 1494 */
1b5ed5e1 1495static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
1da177e4 1496{
a36e71f9 1497 struct cfq_queue *cfqq, *new_cfqq = NULL;
1da177e4 1498
22e2c507
JA
1499 cfqq = cfqd->active_queue;
1500 if (!cfqq)
1501 goto new_queue;
1da177e4 1502
22e2c507 1503 /*
6d048f53 1504 * The active queue has run out of time, expire it and select new.
22e2c507 1505 */
b029195d 1506 if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq))
3b18152c 1507 goto expire;
1da177e4 1508
22e2c507 1509 /*
6d048f53
JA
1510 * The active queue has requests and isn't expired, allow it to
1511 * dispatch.
22e2c507 1512 */
dd67d051 1513 if (!RB_EMPTY_ROOT(&cfqq->sort_list))
22e2c507 1514 goto keep_queue;
6d048f53 1515
a36e71f9
JA
1516 /*
1517 * If another queue has a request waiting within our mean seek
1518 * distance, let it run. The expire code will check for close
1519 * cooperators and put the close queue at the front of the service
df5fe3e8 1520 * tree. If possible, merge the expiring queue with the new cfqq.
a36e71f9 1521 */
b3b6d040 1522 new_cfqq = cfq_close_cooperator(cfqd, cfqq);
df5fe3e8
JM
1523 if (new_cfqq) {
1524 if (!cfqq->new_cfqq)
1525 cfq_setup_merge(cfqq, new_cfqq);
a36e71f9 1526 goto expire;
df5fe3e8 1527 }
a36e71f9 1528
6d048f53
JA
1529 /*
1530 * No requests pending. If the active queue still has requests in
1531 * flight or is idling for a new request, allow either of these
1532 * conditions to happen (or time out) before selecting a new queue.
1533 */
cc197479 1534 if (timer_pending(&cfqd->idle_slice_timer) ||
a6d44e98 1535 (cfqq->dispatched && cfq_should_idle(cfqd, cfqq))) {
caaa5f9f
JA
1536 cfqq = NULL;
1537 goto keep_queue;
22e2c507
JA
1538 }
1539
3b18152c 1540expire:
6084cdda 1541 cfq_slice_expired(cfqd, 0);
3b18152c 1542new_queue:
718eee05
CZ
1543 /*
1544 * Current queue expired. Check if we have to switch to a new
1545 * service tree
1546 */
1547 if (!new_cfqq)
1548 choose_service_tree(cfqd);
1549
a36e71f9 1550 cfqq = cfq_set_active_queue(cfqd, new_cfqq);
22e2c507 1551keep_queue:
3b18152c 1552 return cfqq;
22e2c507
JA
1553}
1554
febffd61 1555static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
d9e7620e
JA
1556{
1557 int dispatched = 0;
1558
1559 while (cfqq->next_rq) {
1560 cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
1561 dispatched++;
1562 }
1563
1564 BUG_ON(!list_empty(&cfqq->fifo));
1565 return dispatched;
1566}
1567
498d3aa2
JA
1568/*
1569 * Drain our current requests. Used for barriers and when switching
1570 * io schedulers on-the-fly.
1571 */
d9e7620e 1572static int cfq_forced_dispatch(struct cfq_data *cfqd)
1b5ed5e1 1573{
0871714e 1574 struct cfq_queue *cfqq;
d9e7620e 1575 int dispatched = 0;
718eee05 1576 int i, j;
c0324a02 1577 for (i = 0; i < 2; ++i)
718eee05
CZ
1578 for (j = 0; j < 3; ++j)
1579 while ((cfqq = cfq_rb_first(&cfqd->service_trees[i][j]))
1580 != NULL)
1581 dispatched += __cfq_forced_dispatch_cfqq(cfqq);
1b5ed5e1 1582
c0324a02 1583 while ((cfqq = cfq_rb_first(&cfqd->service_tree_idle)) != NULL)
d9e7620e 1584 dispatched += __cfq_forced_dispatch_cfqq(cfqq);
1b5ed5e1 1585
6084cdda 1586 cfq_slice_expired(cfqd, 0);
1b5ed5e1
TH
1587
1588 BUG_ON(cfqd->busy_queues);
1589
6923715a 1590 cfq_log(cfqd, "forced_dispatch=%d", dispatched);
1b5ed5e1
TH
1591 return dispatched;
1592}
1593
0b182d61 1594static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2f5cb738 1595{
2f5cb738 1596 unsigned int max_dispatch;
22e2c507 1597
5ad531db
JA
1598 /*
1599 * Drain async requests before we start sync IO
1600 */
a6d44e98 1601 if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_driver[BLK_RW_ASYNC])
0b182d61 1602 return false;
5ad531db 1603
2f5cb738
JA
1604 /*
1605 * If this is an async queue and we have sync IO in flight, let it wait
1606 */
1607 if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq))
0b182d61 1608 return false;
2f5cb738
JA
1609
1610 max_dispatch = cfqd->cfq_quantum;
1611 if (cfq_class_idle(cfqq))
1612 max_dispatch = 1;
b4878f24 1613
2f5cb738
JA
1614 /*
1615 * Does this cfqq already have too much IO in flight?
1616 */
1617 if (cfqq->dispatched >= max_dispatch) {
1618 /*
1619 * idle queue must always only have a single IO in flight
1620 */
3ed9a296 1621 if (cfq_class_idle(cfqq))
0b182d61 1622 return false;
3ed9a296 1623
2f5cb738
JA
1624 /*
1625 * We have other queues, don't allow more IO from this one
1626 */
1627 if (cfqd->busy_queues > 1)
0b182d61 1628 return false;
9ede209e 1629
365722bb 1630 /*
8e296755 1631 * Sole queue user, allow bigger slice
365722bb 1632 */
8e296755
JA
1633 max_dispatch *= 4;
1634 }
1635
1636 /*
1637 * Async queues must wait a bit before being allowed dispatch.
1638 * We also ramp up the dispatch depth gradually for async IO,
1639 * based on the last sync IO we serviced
1640 */
963b72fc 1641 if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
8e296755
JA
1642 unsigned long last_sync = jiffies - cfqd->last_end_sync_rq;
1643 unsigned int depth;
365722bb 1644
61f0c1dc 1645 depth = last_sync / cfqd->cfq_slice[1];
e00c54c3
JA
1646 if (!depth && !cfqq->dispatched)
1647 depth = 1;
8e296755
JA
1648 if (depth < max_dispatch)
1649 max_dispatch = depth;
2f5cb738 1650 }
3ed9a296 1651
0b182d61
JA
1652 /*
1653 * If we're below the current max, allow a dispatch
1654 */
1655 return cfqq->dispatched < max_dispatch;
1656}
1657
1658/*
1659 * Dispatch a request from cfqq, moving them to the request queue
1660 * dispatch list.
1661 */
1662static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1663{
1664 struct request *rq;
1665
1666 BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
1667
1668 if (!cfq_may_dispatch(cfqd, cfqq))
1669 return false;
1670
1671 /*
1672 * follow expired path, else get first next available
1673 */
1674 rq = cfq_check_fifo(cfqq);
1675 if (!rq)
1676 rq = cfqq->next_rq;
1677
1678 /*
1679 * insert request into driver dispatch list
1680 */
1681 cfq_dispatch_insert(cfqd->queue, rq);
1682
1683 if (!cfqd->active_cic) {
1684 struct cfq_io_context *cic = RQ_CIC(rq);
1685
1686 atomic_long_inc(&cic->ioc->refcount);
1687 cfqd->active_cic = cic;
1688 }
1689
1690 return true;
1691}
1692
1693/*
1694 * Find the cfqq that we need to service and move a request from that to the
1695 * dispatch list
1696 */
1697static int cfq_dispatch_requests(struct request_queue *q, int force)
1698{
1699 struct cfq_data *cfqd = q->elevator->elevator_data;
1700 struct cfq_queue *cfqq;
1701
1702 if (!cfqd->busy_queues)
1703 return 0;
1704
1705 if (unlikely(force))
1706 return cfq_forced_dispatch(cfqd);
1707
1708 cfqq = cfq_select_queue(cfqd);
1709 if (!cfqq)
8e296755
JA
1710 return 0;
1711
2f5cb738 1712 /*
0b182d61 1713 * Dispatch a request from this cfqq, if it is allowed
2f5cb738 1714 */
0b182d61
JA
1715 if (!cfq_dispatch_request(cfqd, cfqq))
1716 return 0;
1717
2f5cb738 1718 cfqq->slice_dispatch++;
b029195d 1719 cfq_clear_cfqq_must_dispatch(cfqq);
22e2c507 1720
2f5cb738
JA
1721 /*
1722 * expire an async queue immediately if it has used up its slice. idle
1723 * queue always expire after 1 dispatch round.
1724 */
1725 if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
1726 cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
1727 cfq_class_idle(cfqq))) {
1728 cfqq->slice_end = jiffies + 1;
1729 cfq_slice_expired(cfqd, 0);
1da177e4
LT
1730 }
1731
b217a903 1732 cfq_log_cfqq(cfqd, cfqq, "dispatched a request");
2f5cb738 1733 return 1;
1da177e4
LT
1734}
1735
1da177e4 1736/*
5e705374
JA
1737 * task holds one reference to the queue, dropped when task exits. each rq
1738 * in-flight on this queue also holds a reference, dropped when rq is freed.
1da177e4
LT
1739 *
1740 * queue lock must be held here.
1741 */
1742static void cfq_put_queue(struct cfq_queue *cfqq)
1743{
22e2c507
JA
1744 struct cfq_data *cfqd = cfqq->cfqd;
1745
1746 BUG_ON(atomic_read(&cfqq->ref) <= 0);
1da177e4
LT
1747
1748 if (!atomic_dec_and_test(&cfqq->ref))
1749 return;
1750
7b679138 1751 cfq_log_cfqq(cfqd, cfqq, "put_queue");
1da177e4 1752 BUG_ON(rb_first(&cfqq->sort_list));
22e2c507 1753 BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
3b18152c 1754 BUG_ON(cfq_cfqq_on_rr(cfqq));
1da177e4 1755
28f95cbc 1756 if (unlikely(cfqd->active_queue == cfqq)) {
6084cdda 1757 __cfq_slice_expired(cfqd, cfqq, 0);
23e018a1 1758 cfq_schedule_dispatch(cfqd);
28f95cbc 1759 }
22e2c507 1760
1da177e4
LT
1761 kmem_cache_free(cfq_pool, cfqq);
1762}
1763
d6de8be7
JA
1764/*
1765 * Must always be called with the rcu_read_lock() held
1766 */
07416d29
JA
1767static void
1768__call_for_each_cic(struct io_context *ioc,
1769 void (*func)(struct io_context *, struct cfq_io_context *))
1770{
1771 struct cfq_io_context *cic;
1772 struct hlist_node *n;
1773
1774 hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list)
1775 func(ioc, cic);
1776}
1777
4ac845a2 1778/*
34e6bbf2 1779 * Call func for each cic attached to this ioc.
4ac845a2 1780 */
34e6bbf2 1781static void
4ac845a2
JA
1782call_for_each_cic(struct io_context *ioc,
1783 void (*func)(struct io_context *, struct cfq_io_context *))
1da177e4 1784{
4ac845a2 1785 rcu_read_lock();
07416d29 1786 __call_for_each_cic(ioc, func);
4ac845a2 1787 rcu_read_unlock();
34e6bbf2
FC
1788}
1789
1790static void cfq_cic_free_rcu(struct rcu_head *head)
1791{
1792 struct cfq_io_context *cic;
1793
1794 cic = container_of(head, struct cfq_io_context, rcu_head);
1795
1796 kmem_cache_free(cfq_ioc_pool, cic);
245b2e70 1797 elv_ioc_count_dec(cfq_ioc_count);
34e6bbf2 1798
9a11b4ed
JA
1799 if (ioc_gone) {
1800 /*
1801 * CFQ scheduler is exiting, grab exit lock and check
1802 * the pending io context count. If it hits zero,
1803 * complete ioc_gone and set it back to NULL
1804 */
1805 spin_lock(&ioc_gone_lock);
245b2e70 1806 if (ioc_gone && !elv_ioc_count_read(cfq_ioc_count)) {
9a11b4ed
JA
1807 complete(ioc_gone);
1808 ioc_gone = NULL;
1809 }
1810 spin_unlock(&ioc_gone_lock);
1811 }
34e6bbf2 1812}
4ac845a2 1813
34e6bbf2
FC
1814static void cfq_cic_free(struct cfq_io_context *cic)
1815{
1816 call_rcu(&cic->rcu_head, cfq_cic_free_rcu);
4ac845a2
JA
1817}
1818
1819static void cic_free_func(struct io_context *ioc, struct cfq_io_context *cic)
1820{
1821 unsigned long flags;
1822
1823 BUG_ON(!cic->dead_key);
1824
1825 spin_lock_irqsave(&ioc->lock, flags);
1826 radix_tree_delete(&ioc->radix_root, cic->dead_key);
ffc4e759 1827 hlist_del_rcu(&cic->cic_list);
4ac845a2
JA
1828 spin_unlock_irqrestore(&ioc->lock, flags);
1829
34e6bbf2 1830 cfq_cic_free(cic);
4ac845a2
JA
1831}
1832
d6de8be7
JA
1833/*
1834 * Must be called with rcu_read_lock() held or preemption otherwise disabled.
1835 * Only two callers of this - ->dtor() which is called with the rcu_read_lock(),
1836 * and ->trim() which is called with the task lock held
1837 */
4ac845a2
JA
1838static void cfq_free_io_context(struct io_context *ioc)
1839{
4ac845a2 1840 /*
34e6bbf2
FC
1841 * ioc->refcount is zero here, or we are called from elv_unregister(),
1842 * so no more cic's are allowed to be linked into this ioc. So it
1843 * should be ok to iterate over the known list, we will see all cic's
1844 * since no new ones are added.
4ac845a2 1845 */
07416d29 1846 __call_for_each_cic(ioc, cic_free_func);
1da177e4
LT
1847}
1848
89850f7e 1849static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1da177e4 1850{
df5fe3e8
JM
1851 struct cfq_queue *__cfqq, *next;
1852
28f95cbc 1853 if (unlikely(cfqq == cfqd->active_queue)) {
6084cdda 1854 __cfq_slice_expired(cfqd, cfqq, 0);
23e018a1 1855 cfq_schedule_dispatch(cfqd);
28f95cbc 1856 }
22e2c507 1857
df5fe3e8
JM
1858 /*
1859 * If this queue was scheduled to merge with another queue, be
1860 * sure to drop the reference taken on that queue (and others in
1861 * the merge chain). See cfq_setup_merge and cfq_merge_cfqqs.
1862 */
1863 __cfqq = cfqq->new_cfqq;
1864 while (__cfqq) {
1865 if (__cfqq == cfqq) {
1866 WARN(1, "cfqq->new_cfqq loop detected\n");
1867 break;
1868 }
1869 next = __cfqq->new_cfqq;
1870 cfq_put_queue(__cfqq);
1871 __cfqq = next;
1872 }
1873
89850f7e
JA
1874 cfq_put_queue(cfqq);
1875}
22e2c507 1876
89850f7e
JA
1877static void __cfq_exit_single_io_context(struct cfq_data *cfqd,
1878 struct cfq_io_context *cic)
1879{
4faa3c81
FC
1880 struct io_context *ioc = cic->ioc;
1881
fc46379d 1882 list_del_init(&cic->queue_list);
4ac845a2
JA
1883
1884 /*
1885 * Make sure key == NULL is seen for dead queues
1886 */
fc46379d 1887 smp_wmb();
4ac845a2 1888 cic->dead_key = (unsigned long) cic->key;
fc46379d
JA
1889 cic->key = NULL;
1890
4faa3c81
FC
1891 if (ioc->ioc_data == cic)
1892 rcu_assign_pointer(ioc->ioc_data, NULL);
1893
ff6657c6
JA
1894 if (cic->cfqq[BLK_RW_ASYNC]) {
1895 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]);
1896 cic->cfqq[BLK_RW_ASYNC] = NULL;
12a05732
AV
1897 }
1898
ff6657c6
JA
1899 if (cic->cfqq[BLK_RW_SYNC]) {
1900 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_SYNC]);
1901 cic->cfqq[BLK_RW_SYNC] = NULL;
12a05732 1902 }
89850f7e
JA
1903}
1904
4ac845a2
JA
1905static void cfq_exit_single_io_context(struct io_context *ioc,
1906 struct cfq_io_context *cic)
89850f7e
JA
1907{
1908 struct cfq_data *cfqd = cic->key;
1909
89850f7e 1910 if (cfqd) {
165125e1 1911 struct request_queue *q = cfqd->queue;
4ac845a2 1912 unsigned long flags;
89850f7e 1913
4ac845a2 1914 spin_lock_irqsave(q->queue_lock, flags);
62c1fe9d
JA
1915
1916 /*
1917 * Ensure we get a fresh copy of the ->key to prevent
1918 * race between exiting task and queue
1919 */
1920 smp_read_barrier_depends();
1921 if (cic->key)
1922 __cfq_exit_single_io_context(cfqd, cic);
1923
4ac845a2 1924 spin_unlock_irqrestore(q->queue_lock, flags);
89850f7e 1925 }
1da177e4
LT
1926}
1927
498d3aa2
JA
1928/*
1929 * The process that ioc belongs to has exited, we need to clean up
1930 * and put the internal structures we have that belongs to that process.
1931 */
e2d74ac0 1932static void cfq_exit_io_context(struct io_context *ioc)
1da177e4 1933{
4ac845a2 1934 call_for_each_cic(ioc, cfq_exit_single_io_context);
1da177e4
LT
1935}
1936
22e2c507 1937static struct cfq_io_context *
8267e268 1938cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
1da177e4 1939{
b5deef90 1940 struct cfq_io_context *cic;
1da177e4 1941
94f6030c
CL
1942 cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask | __GFP_ZERO,
1943 cfqd->queue->node);
1da177e4 1944 if (cic) {
22e2c507 1945 cic->last_end_request = jiffies;
553698f9 1946 INIT_LIST_HEAD(&cic->queue_list);
ffc4e759 1947 INIT_HLIST_NODE(&cic->cic_list);
22e2c507
JA
1948 cic->dtor = cfq_free_io_context;
1949 cic->exit = cfq_exit_io_context;
245b2e70 1950 elv_ioc_count_inc(cfq_ioc_count);
1da177e4
LT
1951 }
1952
1953 return cic;
1954}
1955
fd0928df 1956static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc)
22e2c507
JA
1957{
1958 struct task_struct *tsk = current;
1959 int ioprio_class;
1960
3b18152c 1961 if (!cfq_cfqq_prio_changed(cfqq))
22e2c507
JA
1962 return;
1963
fd0928df 1964 ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio);
22e2c507 1965 switch (ioprio_class) {
fe094d98
JA
1966 default:
1967 printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
1968 case IOPRIO_CLASS_NONE:
1969 /*
6d63c275 1970 * no prio set, inherit CPU scheduling settings
fe094d98
JA
1971 */
1972 cfqq->ioprio = task_nice_ioprio(tsk);
6d63c275 1973 cfqq->ioprio_class = task_nice_ioclass(tsk);
fe094d98
JA
1974 break;
1975 case IOPRIO_CLASS_RT:
1976 cfqq->ioprio = task_ioprio(ioc);
1977 cfqq->ioprio_class = IOPRIO_CLASS_RT;
1978 break;
1979 case IOPRIO_CLASS_BE:
1980 cfqq->ioprio = task_ioprio(ioc);
1981 cfqq->ioprio_class = IOPRIO_CLASS_BE;
1982 break;
1983 case IOPRIO_CLASS_IDLE:
1984 cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
1985 cfqq->ioprio = 7;
1986 cfq_clear_cfqq_idle_window(cfqq);
1987 break;
22e2c507
JA
1988 }
1989
1990 /*
1991 * keep track of original prio settings in case we have to temporarily
1992 * elevate the priority of this queue
1993 */
1994 cfqq->org_ioprio = cfqq->ioprio;
1995 cfqq->org_ioprio_class = cfqq->ioprio_class;
3b18152c 1996 cfq_clear_cfqq_prio_changed(cfqq);
22e2c507
JA
1997}
1998
febffd61 1999static void changed_ioprio(struct io_context *ioc, struct cfq_io_context *cic)
22e2c507 2000{
478a82b0
AV
2001 struct cfq_data *cfqd = cic->key;
2002 struct cfq_queue *cfqq;
c1b707d2 2003 unsigned long flags;
35e6077c 2004
caaa5f9f
JA
2005 if (unlikely(!cfqd))
2006 return;
2007
c1b707d2 2008 spin_lock_irqsave(cfqd->queue->queue_lock, flags);
caaa5f9f 2009
ff6657c6 2010 cfqq = cic->cfqq[BLK_RW_ASYNC];
caaa5f9f
JA
2011 if (cfqq) {
2012 struct cfq_queue *new_cfqq;
ff6657c6
JA
2013 new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic->ioc,
2014 GFP_ATOMIC);
caaa5f9f 2015 if (new_cfqq) {
ff6657c6 2016 cic->cfqq[BLK_RW_ASYNC] = new_cfqq;
caaa5f9f
JA
2017 cfq_put_queue(cfqq);
2018 }
22e2c507 2019 }
caaa5f9f 2020
ff6657c6 2021 cfqq = cic->cfqq[BLK_RW_SYNC];
caaa5f9f
JA
2022 if (cfqq)
2023 cfq_mark_cfqq_prio_changed(cfqq);
2024
c1b707d2 2025 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
22e2c507
JA
2026}
2027
fc46379d 2028static void cfq_ioc_set_ioprio(struct io_context *ioc)
22e2c507 2029{
4ac845a2 2030 call_for_each_cic(ioc, changed_ioprio);
fc46379d 2031 ioc->ioprio_changed = 0;
22e2c507
JA
2032}
2033
d5036d77 2034static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
a6151c3a 2035 pid_t pid, bool is_sync)
d5036d77
JA
2036{
2037 RB_CLEAR_NODE(&cfqq->rb_node);
2038 RB_CLEAR_NODE(&cfqq->p_node);
2039 INIT_LIST_HEAD(&cfqq->fifo);
2040
2041 atomic_set(&cfqq->ref, 0);
2042 cfqq->cfqd = cfqd;
2043
2044 cfq_mark_cfqq_prio_changed(cfqq);
2045
2046 if (is_sync) {
2047 if (!cfq_class_idle(cfqq))
2048 cfq_mark_cfqq_idle_window(cfqq);
2049 cfq_mark_cfqq_sync(cfqq);
2050 }
2051 cfqq->pid = pid;
2052}
2053
22e2c507 2054static struct cfq_queue *
a6151c3a 2055cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync,
fd0928df 2056 struct io_context *ioc, gfp_t gfp_mask)
22e2c507 2057{
22e2c507 2058 struct cfq_queue *cfqq, *new_cfqq = NULL;
91fac317 2059 struct cfq_io_context *cic;
22e2c507
JA
2060
2061retry:
4ac845a2 2062 cic = cfq_cic_lookup(cfqd, ioc);
91fac317
VT
2063 /* cic always exists here */
2064 cfqq = cic_to_cfqq(cic, is_sync);
22e2c507 2065
6118b70b
JA
2066 /*
2067 * Always try a new alloc if we fell back to the OOM cfqq
2068 * originally, since it should just be a temporary situation.
2069 */
2070 if (!cfqq || cfqq == &cfqd->oom_cfqq) {
2071 cfqq = NULL;
22e2c507
JA
2072 if (new_cfqq) {
2073 cfqq = new_cfqq;
2074 new_cfqq = NULL;
2075 } else if (gfp_mask & __GFP_WAIT) {
2076 spin_unlock_irq(cfqd->queue->queue_lock);
94f6030c 2077 new_cfqq = kmem_cache_alloc_node(cfq_pool,
6118b70b 2078 gfp_mask | __GFP_ZERO,
94f6030c 2079 cfqd->queue->node);
22e2c507 2080 spin_lock_irq(cfqd->queue->queue_lock);
6118b70b
JA
2081 if (new_cfqq)
2082 goto retry;
22e2c507 2083 } else {
94f6030c
CL
2084 cfqq = kmem_cache_alloc_node(cfq_pool,
2085 gfp_mask | __GFP_ZERO,
2086 cfqd->queue->node);
22e2c507
JA
2087 }
2088
6118b70b
JA
2089 if (cfqq) {
2090 cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
2091 cfq_init_prio_data(cfqq, ioc);
2092 cfq_log_cfqq(cfqd, cfqq, "alloced");
2093 } else
2094 cfqq = &cfqd->oom_cfqq;
22e2c507
JA
2095 }
2096
2097 if (new_cfqq)
2098 kmem_cache_free(cfq_pool, new_cfqq);
2099
22e2c507
JA
2100 return cfqq;
2101}
2102
c2dea2d1
VT
2103static struct cfq_queue **
2104cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
2105{
fe094d98 2106 switch (ioprio_class) {
c2dea2d1
VT
2107 case IOPRIO_CLASS_RT:
2108 return &cfqd->async_cfqq[0][ioprio];
2109 case IOPRIO_CLASS_BE:
2110 return &cfqd->async_cfqq[1][ioprio];
2111 case IOPRIO_CLASS_IDLE:
2112 return &cfqd->async_idle_cfqq;
2113 default:
2114 BUG();
2115 }
2116}
2117
15c31be4 2118static struct cfq_queue *
a6151c3a 2119cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc,
15c31be4
JA
2120 gfp_t gfp_mask)
2121{
fd0928df
JA
2122 const int ioprio = task_ioprio(ioc);
2123 const int ioprio_class = task_ioprio_class(ioc);
c2dea2d1 2124 struct cfq_queue **async_cfqq = NULL;
15c31be4
JA
2125 struct cfq_queue *cfqq = NULL;
2126
c2dea2d1
VT
2127 if (!is_sync) {
2128 async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio);
2129 cfqq = *async_cfqq;
2130 }
2131
6118b70b 2132 if (!cfqq)
fd0928df 2133 cfqq = cfq_find_alloc_queue(cfqd, is_sync, ioc, gfp_mask);
15c31be4
JA
2134
2135 /*
2136 * pin the queue now that it's allocated, scheduler exit will prune it
2137 */
c2dea2d1 2138 if (!is_sync && !(*async_cfqq)) {
15c31be4 2139 atomic_inc(&cfqq->ref);
c2dea2d1 2140 *async_cfqq = cfqq;
15c31be4
JA
2141 }
2142
2143 atomic_inc(&cfqq->ref);
2144 return cfqq;
2145}
2146
498d3aa2
JA
2147/*
2148 * We drop cfq io contexts lazily, so we may find a dead one.
2149 */
dbecf3ab 2150static void
4ac845a2
JA
2151cfq_drop_dead_cic(struct cfq_data *cfqd, struct io_context *ioc,
2152 struct cfq_io_context *cic)
dbecf3ab 2153{
4ac845a2
JA
2154 unsigned long flags;
2155
fc46379d 2156 WARN_ON(!list_empty(&cic->queue_list));
597bc485 2157
4ac845a2
JA
2158 spin_lock_irqsave(&ioc->lock, flags);
2159
4faa3c81 2160 BUG_ON(ioc->ioc_data == cic);
597bc485 2161
4ac845a2 2162 radix_tree_delete(&ioc->radix_root, (unsigned long) cfqd);
ffc4e759 2163 hlist_del_rcu(&cic->cic_list);
4ac845a2
JA
2164 spin_unlock_irqrestore(&ioc->lock, flags);
2165
2166 cfq_cic_free(cic);
dbecf3ab
OH
2167}
2168
e2d74ac0 2169static struct cfq_io_context *
4ac845a2 2170cfq_cic_lookup(struct cfq_data *cfqd, struct io_context *ioc)
e2d74ac0 2171{
e2d74ac0 2172 struct cfq_io_context *cic;
d6de8be7 2173 unsigned long flags;
4ac845a2 2174 void *k;
e2d74ac0 2175
91fac317
VT
2176 if (unlikely(!ioc))
2177 return NULL;
2178
d6de8be7
JA
2179 rcu_read_lock();
2180
597bc485
JA
2181 /*
2182 * we maintain a last-hit cache, to avoid browsing over the tree
2183 */
4ac845a2 2184 cic = rcu_dereference(ioc->ioc_data);
d6de8be7
JA
2185 if (cic && cic->key == cfqd) {
2186 rcu_read_unlock();
597bc485 2187 return cic;
d6de8be7 2188 }
597bc485 2189
4ac845a2 2190 do {
4ac845a2
JA
2191 cic = radix_tree_lookup(&ioc->radix_root, (unsigned long) cfqd);
2192 rcu_read_unlock();
2193 if (!cic)
2194 break;
be3b0753
OH
2195 /* ->key must be copied to avoid race with cfq_exit_queue() */
2196 k = cic->key;
2197 if (unlikely(!k)) {
4ac845a2 2198 cfq_drop_dead_cic(cfqd, ioc, cic);
d6de8be7 2199 rcu_read_lock();
4ac845a2 2200 continue;
dbecf3ab 2201 }
e2d74ac0 2202
d6de8be7 2203 spin_lock_irqsave(&ioc->lock, flags);
4ac845a2 2204 rcu_assign_pointer(ioc->ioc_data, cic);
d6de8be7 2205 spin_unlock_irqrestore(&ioc->lock, flags);
4ac845a2
JA
2206 break;
2207 } while (1);
e2d74ac0 2208
4ac845a2 2209 return cic;
e2d74ac0
JA
2210}
2211
4ac845a2
JA
2212/*
2213 * Add cic into ioc, using cfqd as the search key. This enables us to lookup
2214 * the process specific cfq io context when entered from the block layer.
2215 * Also adds the cic to a per-cfqd list, used when this queue is removed.
2216 */
febffd61
JA
2217static int cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
2218 struct cfq_io_context *cic, gfp_t gfp_mask)
e2d74ac0 2219{
0261d688 2220 unsigned long flags;
4ac845a2 2221 int ret;
e2d74ac0 2222
4ac845a2
JA
2223 ret = radix_tree_preload(gfp_mask);
2224 if (!ret) {
2225 cic->ioc = ioc;
2226 cic->key = cfqd;
e2d74ac0 2227
4ac845a2
JA
2228 spin_lock_irqsave(&ioc->lock, flags);
2229 ret = radix_tree_insert(&ioc->radix_root,
2230 (unsigned long) cfqd, cic);
ffc4e759
JA
2231 if (!ret)
2232 hlist_add_head_rcu(&cic->cic_list, &ioc->cic_list);
4ac845a2 2233 spin_unlock_irqrestore(&ioc->lock, flags);
e2d74ac0 2234
4ac845a2
JA
2235 radix_tree_preload_end();
2236
2237 if (!ret) {
2238 spin_lock_irqsave(cfqd->queue->queue_lock, flags);
2239 list_add(&cic->queue_list, &cfqd->cic_list);
2240 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
2241 }
e2d74ac0
JA
2242 }
2243
4ac845a2
JA
2244 if (ret)
2245 printk(KERN_ERR "cfq: cic link failed!\n");
fc46379d 2246
4ac845a2 2247 return ret;
e2d74ac0
JA
2248}
2249
1da177e4
LT
2250/*
2251 * Setup general io context and cfq io context. There can be several cfq
2252 * io contexts per general io context, if this process is doing io to more
e2d74ac0 2253 * than one device managed by cfq.
1da177e4
LT
2254 */
2255static struct cfq_io_context *
e2d74ac0 2256cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
1da177e4 2257{
22e2c507 2258 struct io_context *ioc = NULL;
1da177e4 2259 struct cfq_io_context *cic;
1da177e4 2260
22e2c507 2261 might_sleep_if(gfp_mask & __GFP_WAIT);
1da177e4 2262
b5deef90 2263 ioc = get_io_context(gfp_mask, cfqd->queue->node);
1da177e4
LT
2264 if (!ioc)
2265 return NULL;
2266
4ac845a2 2267 cic = cfq_cic_lookup(cfqd, ioc);
e2d74ac0
JA
2268 if (cic)
2269 goto out;
1da177e4 2270
e2d74ac0
JA
2271 cic = cfq_alloc_io_context(cfqd, gfp_mask);
2272 if (cic == NULL)
2273 goto err;
1da177e4 2274
4ac845a2
JA
2275 if (cfq_cic_link(cfqd, ioc, cic, gfp_mask))
2276 goto err_free;
2277
1da177e4 2278out:
fc46379d
JA
2279 smp_read_barrier_depends();
2280 if (unlikely(ioc->ioprio_changed))
2281 cfq_ioc_set_ioprio(ioc);
2282
1da177e4 2283 return cic;
4ac845a2
JA
2284err_free:
2285 cfq_cic_free(cic);
1da177e4
LT
2286err:
2287 put_io_context(ioc);
2288 return NULL;
2289}
2290
22e2c507
JA
2291static void
2292cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic)
1da177e4 2293{
aaf1228d
JA
2294 unsigned long elapsed = jiffies - cic->last_end_request;
2295 unsigned long ttime = min(elapsed, 2UL * cfqd->cfq_slice_idle);
db3b5848 2296
22e2c507
JA
2297 cic->ttime_samples = (7*cic->ttime_samples + 256) / 8;
2298 cic->ttime_total = (7*cic->ttime_total + 256*ttime) / 8;
2299 cic->ttime_mean = (cic->ttime_total + 128) / cic->ttime_samples;
2300}
1da177e4 2301
206dc69b 2302static void
b2c18e1e 2303cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
6d048f53 2304 struct request *rq)
206dc69b
JA
2305{
2306 sector_t sdist;
2307 u64 total;
2308
b2c18e1e 2309 if (!cfqq->last_request_pos)
4d00aa47 2310 sdist = 0;
b2c18e1e
JM
2311 else if (cfqq->last_request_pos < blk_rq_pos(rq))
2312 sdist = blk_rq_pos(rq) - cfqq->last_request_pos;
206dc69b 2313 else
b2c18e1e 2314 sdist = cfqq->last_request_pos - blk_rq_pos(rq);
206dc69b
JA
2315
2316 /*
2317 * Don't allow the seek distance to get too large from the
2318 * odd fragment, pagein, etc
2319 */
b2c18e1e
JM
2320 if (cfqq->seek_samples <= 60) /* second&third seek */
2321 sdist = min(sdist, (cfqq->seek_mean * 4) + 2*1024*1024);
206dc69b 2322 else
b2c18e1e 2323 sdist = min(sdist, (cfqq->seek_mean * 4) + 2*1024*64);
206dc69b 2324
b2c18e1e
JM
2325 cfqq->seek_samples = (7*cfqq->seek_samples + 256) / 8;
2326 cfqq->seek_total = (7*cfqq->seek_total + (u64)256*sdist) / 8;
2327 total = cfqq->seek_total + (cfqq->seek_samples/2);
2328 do_div(total, cfqq->seek_samples);
2329 cfqq->seek_mean = (sector_t)total;
e6c5bc73
JM
2330
2331 /*
2332 * If this cfqq is shared between multiple processes, check to
2333 * make sure that those processes are still issuing I/Os within
2334 * the mean seek distance. If not, it may be time to break the
2335 * queues apart again.
2336 */
2337 if (cfq_cfqq_coop(cfqq)) {
2338 if (CFQQ_SEEKY(cfqq) && !cfqq->seeky_start)
2339 cfqq->seeky_start = jiffies;
2340 else if (!CFQQ_SEEKY(cfqq))
2341 cfqq->seeky_start = 0;
2342 }
206dc69b 2343}
1da177e4 2344
22e2c507
JA
2345/*
2346 * Disable idle window if the process thinks too long or seeks so much that
2347 * it doesn't matter
2348 */
2349static void
2350cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2351 struct cfq_io_context *cic)
2352{
7b679138 2353 int old_idle, enable_idle;
1be92f2f 2354
0871714e
JA
2355 /*
2356 * Don't idle for async or idle io prio class
2357 */
2358 if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq))
1be92f2f
JA
2359 return;
2360
c265a7f4 2361 enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
1da177e4 2362
66dac98e 2363 if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle ||
718eee05 2364 (sample_valid(cfqq->seek_samples) && CFQQ_SEEKY(cfqq)))
22e2c507
JA
2365 enable_idle = 0;
2366 else if (sample_valid(cic->ttime_samples)) {
718eee05 2367 if (cic->ttime_mean > cfqd->cfq_slice_idle)
22e2c507
JA
2368 enable_idle = 0;
2369 else
2370 enable_idle = 1;
1da177e4
LT
2371 }
2372
7b679138
JA
2373 if (old_idle != enable_idle) {
2374 cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle);
2375 if (enable_idle)
2376 cfq_mark_cfqq_idle_window(cfqq);
2377 else
2378 cfq_clear_cfqq_idle_window(cfqq);
2379 }
22e2c507 2380}
1da177e4 2381
22e2c507
JA
2382/*
2383 * Check if new_cfqq should preempt the currently active queue. Return 0 for
2384 * no or if we aren't sure, a 1 will cause a preempt.
2385 */
a6151c3a 2386static bool
22e2c507 2387cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
5e705374 2388 struct request *rq)
22e2c507 2389{
6d048f53 2390 struct cfq_queue *cfqq;
22e2c507 2391
6d048f53
JA
2392 cfqq = cfqd->active_queue;
2393 if (!cfqq)
a6151c3a 2394 return false;
22e2c507 2395
6d048f53 2396 if (cfq_slice_used(cfqq))
a6151c3a 2397 return true;
6d048f53
JA
2398
2399 if (cfq_class_idle(new_cfqq))
a6151c3a 2400 return false;
22e2c507
JA
2401
2402 if (cfq_class_idle(cfqq))
a6151c3a 2403 return true;
1e3335de 2404
718eee05
CZ
2405 if (cfqd->serving_type == SYNC_NOIDLE_WORKLOAD
2406 && new_cfqq->service_tree == cfqq->service_tree)
2407 return true;
2408
374f84ac
JA
2409 /*
2410 * if the new request is sync, but the currently running queue is
2411 * not, let the sync request have priority.
2412 */
5e705374 2413 if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
a6151c3a 2414 return true;
1e3335de 2415
374f84ac
JA
2416 /*
2417 * So both queues are sync. Let the new request get disk time if
2418 * it's a metadata request and the current queue is doing regular IO.
2419 */
2420 if (rq_is_meta(rq) && !cfqq->meta_pending)
e6ec4fe2 2421 return true;
22e2c507 2422
3a9a3f6c
DS
2423 /*
2424 * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
2425 */
2426 if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
a6151c3a 2427 return true;
3a9a3f6c 2428
1e3335de 2429 if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
a6151c3a 2430 return false;
1e3335de
JA
2431
2432 /*
2433 * if this request is as-good as one we would expect from the
2434 * current cfqq, let it preempt
2435 */
2058297d 2436 if (cfq_rq_close(cfqd, cfqq, rq) && (!cfq_cfqq_coop(new_cfqq) ||
4b27e1bb
SL
2437 cfqd->busy_queues == 1)) {
2438 /*
2439 * Mark new queue coop_preempt, so its coop flag will not be
2440 * cleared when new queue gets scheduled at the very first time
2441 */
2442 cfq_mark_cfqq_coop_preempt(new_cfqq);
2443 cfq_mark_cfqq_coop(new_cfqq);
a6151c3a 2444 return true;
4b27e1bb 2445 }
1e3335de 2446
a6151c3a 2447 return false;
22e2c507
JA
2448}
2449
2450/*
2451 * cfqq preempts the active queue. if we allowed preempt with no slice left,
2452 * let it have half of its nominal slice.
2453 */
2454static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2455{
7b679138 2456 cfq_log_cfqq(cfqd, cfqq, "preempt");
6084cdda 2457 cfq_slice_expired(cfqd, 1);
22e2c507 2458
bf572256
JA
2459 /*
2460 * Put the new queue at the front of the of the current list,
2461 * so we know that it will be selected next.
2462 */
2463 BUG_ON(!cfq_cfqq_on_rr(cfqq));
edd75ffd
JA
2464
2465 cfq_service_tree_add(cfqd, cfqq, 1);
bf572256 2466
44f7c160
JA
2467 cfqq->slice_end = 0;
2468 cfq_mark_cfqq_slice_new(cfqq);
22e2c507
JA
2469}
2470
22e2c507 2471/*
5e705374 2472 * Called when a new fs request (rq) is added (to cfqq). Check if there's
22e2c507
JA
2473 * something we should do about it
2474 */
2475static void
5e705374
JA
2476cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2477 struct request *rq)
22e2c507 2478{
5e705374 2479 struct cfq_io_context *cic = RQ_CIC(rq);
12e9fddd 2480
45333d5a 2481 cfqd->rq_queued++;
374f84ac
JA
2482 if (rq_is_meta(rq))
2483 cfqq->meta_pending++;
2484
9c2c38a1 2485 cfq_update_io_thinktime(cfqd, cic);
b2c18e1e 2486 cfq_update_io_seektime(cfqd, cfqq, rq);
9c2c38a1
JA
2487 cfq_update_idle_window(cfqd, cfqq, cic);
2488
b2c18e1e 2489 cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
22e2c507
JA
2490
2491 if (cfqq == cfqd->active_queue) {
2492 /*
b029195d
JA
2493 * Remember that we saw a request from this process, but
2494 * don't start queuing just yet. Otherwise we risk seeing lots
2495 * of tiny requests, because we disrupt the normal plugging
d6ceb25e
JA
2496 * and merging. If the request is already larger than a single
2497 * page, let it rip immediately. For that case we assume that
2d870722
JA
2498 * merging is already done. Ditto for a busy system that
2499 * has other work pending, don't risk delaying until the
2500 * idle timer unplug to continue working.
22e2c507 2501 */
d6ceb25e 2502 if (cfq_cfqq_wait_request(cfqq)) {
2d870722
JA
2503 if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
2504 cfqd->busy_queues > 1) {
d6ceb25e 2505 del_timer(&cfqd->idle_slice_timer);
a7f55792 2506 __blk_run_queue(cfqd->queue);
d6ceb25e 2507 }
b029195d 2508 cfq_mark_cfqq_must_dispatch(cfqq);
d6ceb25e 2509 }
5e705374 2510 } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
22e2c507
JA
2511 /*
2512 * not the active queue - expire current slice if it is
2513 * idle and has expired it's mean thinktime or this new queue
3a9a3f6c
DS
2514 * has some old slice time left and is of higher priority or
2515 * this new queue is RT and the current one is BE
22e2c507
JA
2516 */
2517 cfq_preempt_queue(cfqd, cfqq);
a7f55792 2518 __blk_run_queue(cfqd->queue);
22e2c507 2519 }
1da177e4
LT
2520}
2521
165125e1 2522static void cfq_insert_request(struct request_queue *q, struct request *rq)
1da177e4 2523{
b4878f24 2524 struct cfq_data *cfqd = q->elevator->elevator_data;
5e705374 2525 struct cfq_queue *cfqq = RQ_CFQQ(rq);
22e2c507 2526
7b679138 2527 cfq_log_cfqq(cfqd, cfqq, "insert_request");
fd0928df 2528 cfq_init_prio_data(cfqq, RQ_CIC(rq)->ioc);
1da177e4 2529
30996f40 2530 rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
22e2c507 2531 list_add_tail(&rq->queuelist, &cfqq->fifo);
aa6f6a3d 2532 cfq_add_rq_rb(rq);
22e2c507 2533
5e705374 2534 cfq_rq_enqueued(cfqd, cfqq, rq);
1da177e4
LT
2535}
2536
45333d5a
AC
2537/*
2538 * Update hw_tag based on peak queue depth over 50 samples under
2539 * sufficient load.
2540 */
2541static void cfq_update_hw_tag(struct cfq_data *cfqd)
2542{
1a1238a7
SL
2543 struct cfq_queue *cfqq = cfqd->active_queue;
2544
5ad531db
JA
2545 if (rq_in_driver(cfqd) > cfqd->rq_in_driver_peak)
2546 cfqd->rq_in_driver_peak = rq_in_driver(cfqd);
45333d5a
AC
2547
2548 if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN &&
5ad531db 2549 rq_in_driver(cfqd) <= CFQ_HW_QUEUE_MIN)
45333d5a
AC
2550 return;
2551
1a1238a7
SL
2552 /*
2553 * If active queue hasn't enough requests and can idle, cfq might not
2554 * dispatch sufficient requests to hardware. Don't zero hw_tag in this
2555 * case
2556 */
2557 if (cfqq && cfq_cfqq_idle_window(cfqq) &&
2558 cfqq->dispatched + cfqq->queued[0] + cfqq->queued[1] <
2559 CFQ_HW_QUEUE_MIN && rq_in_driver(cfqd) < CFQ_HW_QUEUE_MIN)
2560 return;
2561
45333d5a
AC
2562 if (cfqd->hw_tag_samples++ < 50)
2563 return;
2564
2565 if (cfqd->rq_in_driver_peak >= CFQ_HW_QUEUE_MIN)
2566 cfqd->hw_tag = 1;
2567 else
2568 cfqd->hw_tag = 0;
2569
2570 cfqd->hw_tag_samples = 0;
2571 cfqd->rq_in_driver_peak = 0;
2572}
2573
165125e1 2574static void cfq_completed_request(struct request_queue *q, struct request *rq)
1da177e4 2575{
5e705374 2576 struct cfq_queue *cfqq = RQ_CFQQ(rq);
b4878f24 2577 struct cfq_data *cfqd = cfqq->cfqd;
5380a101 2578 const int sync = rq_is_sync(rq);
b4878f24 2579 unsigned long now;
1da177e4 2580
b4878f24 2581 now = jiffies;
7b679138 2582 cfq_log_cfqq(cfqd, cfqq, "complete");
1da177e4 2583
45333d5a
AC
2584 cfq_update_hw_tag(cfqd);
2585
5ad531db 2586 WARN_ON(!cfqd->rq_in_driver[sync]);
6d048f53 2587 WARN_ON(!cfqq->dispatched);
5ad531db 2588 cfqd->rq_in_driver[sync]--;
6d048f53 2589 cfqq->dispatched--;
1da177e4 2590
3ed9a296
JA
2591 if (cfq_cfqq_sync(cfqq))
2592 cfqd->sync_flight--;
2593
365722bb 2594 if (sync) {
5e705374 2595 RQ_CIC(rq)->last_end_request = now;
365722bb
VG
2596 cfqd->last_end_sync_rq = now;
2597 }
caaa5f9f
JA
2598
2599 /*
2600 * If this is the active queue, check if it needs to be expired,
2601 * or if we want to idle in case it has no pending requests.
2602 */
2603 if (cfqd->active_queue == cfqq) {
a36e71f9
JA
2604 const bool cfqq_empty = RB_EMPTY_ROOT(&cfqq->sort_list);
2605
44f7c160
JA
2606 if (cfq_cfqq_slice_new(cfqq)) {
2607 cfq_set_prio_slice(cfqd, cfqq);
2608 cfq_clear_cfqq_slice_new(cfqq);
2609 }
a36e71f9
JA
2610 /*
2611 * If there are no requests waiting in this queue, and
2612 * there are other queues ready to issue requests, AND
2613 * those other queues are issuing requests within our
2614 * mean seek distance, give them a chance to run instead
2615 * of idling.
2616 */
0871714e 2617 if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
6084cdda 2618 cfq_slice_expired(cfqd, 1);
b3b6d040 2619 else if (cfqq_empty && !cfq_close_cooperator(cfqd, cfqq) &&
a36e71f9 2620 sync && !rq_noidle(rq))
6d048f53 2621 cfq_arm_slice_timer(cfqd);
caaa5f9f 2622 }
6d048f53 2623
5ad531db 2624 if (!rq_in_driver(cfqd))
23e018a1 2625 cfq_schedule_dispatch(cfqd);
1da177e4
LT
2626}
2627
22e2c507
JA
2628/*
2629 * we temporarily boost lower priority queues if they are holding fs exclusive
2630 * resources. they are boosted to normal prio (CLASS_BE/4)
2631 */
2632static void cfq_prio_boost(struct cfq_queue *cfqq)
1da177e4 2633{
22e2c507
JA
2634 if (has_fs_excl()) {
2635 /*
2636 * boost idle prio on transactions that would lock out other
2637 * users of the filesystem
2638 */
2639 if (cfq_class_idle(cfqq))
2640 cfqq->ioprio_class = IOPRIO_CLASS_BE;
2641 if (cfqq->ioprio > IOPRIO_NORM)
2642 cfqq->ioprio = IOPRIO_NORM;
2643 } else {
2644 /*
dddb7451 2645 * unboost the queue (if needed)
22e2c507 2646 */
dddb7451
CZ
2647 cfqq->ioprio_class = cfqq->org_ioprio_class;
2648 cfqq->ioprio = cfqq->org_ioprio;
22e2c507 2649 }
22e2c507 2650}
1da177e4 2651
89850f7e 2652static inline int __cfq_may_queue(struct cfq_queue *cfqq)
22e2c507 2653{
1b379d8d 2654 if (cfq_cfqq_wait_request(cfqq) && !cfq_cfqq_must_alloc_slice(cfqq)) {
3b18152c 2655 cfq_mark_cfqq_must_alloc_slice(cfqq);
22e2c507 2656 return ELV_MQUEUE_MUST;
3b18152c 2657 }
1da177e4 2658
22e2c507 2659 return ELV_MQUEUE_MAY;
22e2c507
JA
2660}
2661
165125e1 2662static int cfq_may_queue(struct request_queue *q, int rw)
22e2c507
JA
2663{
2664 struct cfq_data *cfqd = q->elevator->elevator_data;
2665 struct task_struct *tsk = current;
91fac317 2666 struct cfq_io_context *cic;
22e2c507
JA
2667 struct cfq_queue *cfqq;
2668
2669 /*
2670 * don't force setup of a queue from here, as a call to may_queue
2671 * does not necessarily imply that a request actually will be queued.
2672 * so just lookup a possibly existing queue, or return 'may queue'
2673 * if that fails
2674 */
4ac845a2 2675 cic = cfq_cic_lookup(cfqd, tsk->io_context);
91fac317
VT
2676 if (!cic)
2677 return ELV_MQUEUE_MAY;
2678
b0b78f81 2679 cfqq = cic_to_cfqq(cic, rw_is_sync(rw));
22e2c507 2680 if (cfqq) {
fd0928df 2681 cfq_init_prio_data(cfqq, cic->ioc);
22e2c507
JA
2682 cfq_prio_boost(cfqq);
2683
89850f7e 2684 return __cfq_may_queue(cfqq);
22e2c507
JA
2685 }
2686
2687 return ELV_MQUEUE_MAY;
1da177e4
LT
2688}
2689
1da177e4
LT
2690/*
2691 * queue lock held here
2692 */
bb37b94c 2693static void cfq_put_request(struct request *rq)
1da177e4 2694{
5e705374 2695 struct cfq_queue *cfqq = RQ_CFQQ(rq);
1da177e4 2696
5e705374 2697 if (cfqq) {
22e2c507 2698 const int rw = rq_data_dir(rq);
1da177e4 2699
22e2c507
JA
2700 BUG_ON(!cfqq->allocated[rw]);
2701 cfqq->allocated[rw]--;
1da177e4 2702
5e705374 2703 put_io_context(RQ_CIC(rq)->ioc);
1da177e4 2704
1da177e4 2705 rq->elevator_private = NULL;
5e705374 2706 rq->elevator_private2 = NULL;
1da177e4 2707
1da177e4
LT
2708 cfq_put_queue(cfqq);
2709 }
2710}
2711
df5fe3e8
JM
2712static struct cfq_queue *
2713cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_context *cic,
2714 struct cfq_queue *cfqq)
2715{
2716 cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq);
2717 cic_set_cfqq(cic, cfqq->new_cfqq, 1);
b3b6d040 2718 cfq_mark_cfqq_coop(cfqq->new_cfqq);
df5fe3e8
JM
2719 cfq_put_queue(cfqq);
2720 return cic_to_cfqq(cic, 1);
2721}
2722
e6c5bc73
JM
2723static int should_split_cfqq(struct cfq_queue *cfqq)
2724{
2725 if (cfqq->seeky_start &&
2726 time_after(jiffies, cfqq->seeky_start + CFQQ_COOP_TOUT))
2727 return 1;
2728 return 0;
2729}
2730
2731/*
2732 * Returns NULL if a new cfqq should be allocated, or the old cfqq if this
2733 * was the last process referring to said cfqq.
2734 */
2735static struct cfq_queue *
2736split_cfqq(struct cfq_io_context *cic, struct cfq_queue *cfqq)
2737{
2738 if (cfqq_process_refs(cfqq) == 1) {
2739 cfqq->seeky_start = 0;
2740 cfqq->pid = current->pid;
2741 cfq_clear_cfqq_coop(cfqq);
2742 return cfqq;
2743 }
2744
2745 cic_set_cfqq(cic, NULL, 1);
2746 cfq_put_queue(cfqq);
2747 return NULL;
2748}
1da177e4 2749/*
22e2c507 2750 * Allocate cfq data structures associated with this request.
1da177e4 2751 */
22e2c507 2752static int
165125e1 2753cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
1da177e4
LT
2754{
2755 struct cfq_data *cfqd = q->elevator->elevator_data;
2756 struct cfq_io_context *cic;
2757 const int rw = rq_data_dir(rq);
a6151c3a 2758 const bool is_sync = rq_is_sync(rq);
22e2c507 2759 struct cfq_queue *cfqq;
1da177e4
LT
2760 unsigned long flags;
2761
2762 might_sleep_if(gfp_mask & __GFP_WAIT);
2763
e2d74ac0 2764 cic = cfq_get_io_context(cfqd, gfp_mask);
22e2c507 2765
1da177e4
LT
2766 spin_lock_irqsave(q->queue_lock, flags);
2767
22e2c507
JA
2768 if (!cic)
2769 goto queue_fail;
2770
e6c5bc73 2771new_queue:
91fac317 2772 cfqq = cic_to_cfqq(cic, is_sync);
32f2e807 2773 if (!cfqq || cfqq == &cfqd->oom_cfqq) {
fd0928df 2774 cfqq = cfq_get_queue(cfqd, is_sync, cic->ioc, gfp_mask);
91fac317 2775 cic_set_cfqq(cic, cfqq, is_sync);
df5fe3e8 2776 } else {
e6c5bc73
JM
2777 /*
2778 * If the queue was seeky for too long, break it apart.
2779 */
2780 if (cfq_cfqq_coop(cfqq) && should_split_cfqq(cfqq)) {
2781 cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq");
2782 cfqq = split_cfqq(cic, cfqq);
2783 if (!cfqq)
2784 goto new_queue;
2785 }
2786
df5fe3e8
JM
2787 /*
2788 * Check to see if this queue is scheduled to merge with
2789 * another, closely cooperating queue. The merging of
2790 * queues happens here as it must be done in process context.
2791 * The reference on new_cfqq was taken in merge_cfqqs.
2792 */
2793 if (cfqq->new_cfqq)
2794 cfqq = cfq_merge_cfqqs(cfqd, cic, cfqq);
91fac317 2795 }
1da177e4
LT
2796
2797 cfqq->allocated[rw]++;
22e2c507 2798 atomic_inc(&cfqq->ref);
1da177e4 2799
5e705374 2800 spin_unlock_irqrestore(q->queue_lock, flags);
3b18152c 2801
5e705374
JA
2802 rq->elevator_private = cic;
2803 rq->elevator_private2 = cfqq;
2804 return 0;
1da177e4 2805
22e2c507
JA
2806queue_fail:
2807 if (cic)
2808 put_io_context(cic->ioc);
89850f7e 2809
23e018a1 2810 cfq_schedule_dispatch(cfqd);
1da177e4 2811 spin_unlock_irqrestore(q->queue_lock, flags);
7b679138 2812 cfq_log(cfqd, "set_request fail");
1da177e4
LT
2813 return 1;
2814}
2815
65f27f38 2816static void cfq_kick_queue(struct work_struct *work)
22e2c507 2817{
65f27f38 2818 struct cfq_data *cfqd =
23e018a1 2819 container_of(work, struct cfq_data, unplug_work);
165125e1 2820 struct request_queue *q = cfqd->queue;
22e2c507 2821
40bb54d1 2822 spin_lock_irq(q->queue_lock);
a7f55792 2823 __blk_run_queue(cfqd->queue);
40bb54d1 2824 spin_unlock_irq(q->queue_lock);
22e2c507
JA
2825}
2826
2827/*
2828 * Timer running if the active_queue is currently idling inside its time slice
2829 */
2830static void cfq_idle_slice_timer(unsigned long data)
2831{
2832 struct cfq_data *cfqd = (struct cfq_data *) data;
2833 struct cfq_queue *cfqq;
2834 unsigned long flags;
3c6bd2f8 2835 int timed_out = 1;
22e2c507 2836
7b679138
JA
2837 cfq_log(cfqd, "idle timer fired");
2838
22e2c507
JA
2839 spin_lock_irqsave(cfqd->queue->queue_lock, flags);
2840
fe094d98
JA
2841 cfqq = cfqd->active_queue;
2842 if (cfqq) {
3c6bd2f8
JA
2843 timed_out = 0;
2844
b029195d
JA
2845 /*
2846 * We saw a request before the queue expired, let it through
2847 */
2848 if (cfq_cfqq_must_dispatch(cfqq))
2849 goto out_kick;
2850
22e2c507
JA
2851 /*
2852 * expired
2853 */
44f7c160 2854 if (cfq_slice_used(cfqq))
22e2c507
JA
2855 goto expire;
2856
2857 /*
2858 * only expire and reinvoke request handler, if there are
2859 * other queues with pending requests
2860 */
caaa5f9f 2861 if (!cfqd->busy_queues)
22e2c507 2862 goto out_cont;
22e2c507
JA
2863
2864 /*
2865 * not expired and it has a request pending, let it dispatch
2866 */
75e50984 2867 if (!RB_EMPTY_ROOT(&cfqq->sort_list))
22e2c507 2868 goto out_kick;
22e2c507
JA
2869 }
2870expire:
6084cdda 2871 cfq_slice_expired(cfqd, timed_out);
22e2c507 2872out_kick:
23e018a1 2873 cfq_schedule_dispatch(cfqd);
22e2c507
JA
2874out_cont:
2875 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
2876}
2877
3b18152c
JA
2878static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
2879{
2880 del_timer_sync(&cfqd->idle_slice_timer);
23e018a1 2881 cancel_work_sync(&cfqd->unplug_work);
3b18152c 2882}
22e2c507 2883
c2dea2d1
VT
2884static void cfq_put_async_queues(struct cfq_data *cfqd)
2885{
2886 int i;
2887
2888 for (i = 0; i < IOPRIO_BE_NR; i++) {
2889 if (cfqd->async_cfqq[0][i])
2890 cfq_put_queue(cfqd->async_cfqq[0][i]);
2891 if (cfqd->async_cfqq[1][i])
2892 cfq_put_queue(cfqd->async_cfqq[1][i]);
c2dea2d1 2893 }
2389d1ef
ON
2894
2895 if (cfqd->async_idle_cfqq)
2896 cfq_put_queue(cfqd->async_idle_cfqq);
c2dea2d1
VT
2897}
2898
b374d18a 2899static void cfq_exit_queue(struct elevator_queue *e)
1da177e4 2900{
22e2c507 2901 struct cfq_data *cfqd = e->elevator_data;
165125e1 2902 struct request_queue *q = cfqd->queue;
22e2c507 2903
3b18152c 2904 cfq_shutdown_timer_wq(cfqd);
e2d74ac0 2905
d9ff4187 2906 spin_lock_irq(q->queue_lock);
e2d74ac0 2907
d9ff4187 2908 if (cfqd->active_queue)
6084cdda 2909 __cfq_slice_expired(cfqd, cfqd->active_queue, 0);
e2d74ac0
JA
2910
2911 while (!list_empty(&cfqd->cic_list)) {
d9ff4187
AV
2912 struct cfq_io_context *cic = list_entry(cfqd->cic_list.next,
2913 struct cfq_io_context,
2914 queue_list);
89850f7e
JA
2915
2916 __cfq_exit_single_io_context(cfqd, cic);
d9ff4187 2917 }
e2d74ac0 2918
c2dea2d1 2919 cfq_put_async_queues(cfqd);
15c31be4 2920
d9ff4187 2921 spin_unlock_irq(q->queue_lock);
a90d742e
AV
2922
2923 cfq_shutdown_timer_wq(cfqd);
2924
a90d742e 2925 kfree(cfqd);
1da177e4
LT
2926}
2927
165125e1 2928static void *cfq_init_queue(struct request_queue *q)
1da177e4
LT
2929{
2930 struct cfq_data *cfqd;
718eee05 2931 int i, j;
1da177e4 2932
94f6030c 2933 cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
1da177e4 2934 if (!cfqd)
bc1c1169 2935 return NULL;
1da177e4 2936
c0324a02 2937 for (i = 0; i < 2; ++i)
718eee05
CZ
2938 for (j = 0; j < 3; ++j)
2939 cfqd->service_trees[i][j] = CFQ_RB_ROOT;
c0324a02 2940 cfqd->service_tree_idle = CFQ_RB_ROOT;
26a2ac00
JA
2941
2942 /*
2943 * Not strictly needed (since RB_ROOT just clears the node and we
2944 * zeroed cfqd on alloc), but better be safe in case someone decides
2945 * to add magic to the rb code
2946 */
2947 for (i = 0; i < CFQ_PRIO_LISTS; i++)
2948 cfqd->prio_trees[i] = RB_ROOT;
2949
6118b70b
JA
2950 /*
2951 * Our fallback cfqq if cfq_find_alloc_queue() runs into OOM issues.
2952 * Grab a permanent reference to it, so that the normal code flow
2953 * will not attempt to free it.
2954 */
2955 cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
2956 atomic_inc(&cfqd->oom_cfqq.ref);
2957
d9ff4187 2958 INIT_LIST_HEAD(&cfqd->cic_list);
1da177e4 2959
1da177e4 2960 cfqd->queue = q;
1da177e4 2961
22e2c507
JA
2962 init_timer(&cfqd->idle_slice_timer);
2963 cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
2964 cfqd->idle_slice_timer.data = (unsigned long) cfqd;
2965
23e018a1 2966 INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
22e2c507 2967
1da177e4 2968 cfqd->cfq_quantum = cfq_quantum;
22e2c507
JA
2969 cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
2970 cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
1da177e4
LT
2971 cfqd->cfq_back_max = cfq_back_max;
2972 cfqd->cfq_back_penalty = cfq_back_penalty;
22e2c507
JA
2973 cfqd->cfq_slice[0] = cfq_slice_async;
2974 cfqd->cfq_slice[1] = cfq_slice_sync;
2975 cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
2976 cfqd->cfq_slice_idle = cfq_slice_idle;
963b72fc 2977 cfqd->cfq_latency = 1;
45333d5a 2978 cfqd->hw_tag = 1;
365722bb 2979 cfqd->last_end_sync_rq = jiffies;
bc1c1169 2980 return cfqd;
1da177e4
LT
2981}
2982
2983static void cfq_slab_kill(void)
2984{
d6de8be7
JA
2985 /*
2986 * Caller already ensured that pending RCU callbacks are completed,
2987 * so we should have no busy allocations at this point.
2988 */
1da177e4
LT
2989 if (cfq_pool)
2990 kmem_cache_destroy(cfq_pool);
2991 if (cfq_ioc_pool)
2992 kmem_cache_destroy(cfq_ioc_pool);
2993}
2994
2995static int __init cfq_slab_setup(void)
2996{
0a31bd5f 2997 cfq_pool = KMEM_CACHE(cfq_queue, 0);
1da177e4
LT
2998 if (!cfq_pool)
2999 goto fail;
3000
34e6bbf2 3001 cfq_ioc_pool = KMEM_CACHE(cfq_io_context, 0);
1da177e4
LT
3002 if (!cfq_ioc_pool)
3003 goto fail;
3004
3005 return 0;
3006fail:
3007 cfq_slab_kill();
3008 return -ENOMEM;
3009}
3010
1da177e4
LT
3011/*
3012 * sysfs parts below -->
3013 */
1da177e4
LT
3014static ssize_t
3015cfq_var_show(unsigned int var, char *page)
3016{
3017 return sprintf(page, "%d\n", var);
3018}
3019
3020static ssize_t
3021cfq_var_store(unsigned int *var, const char *page, size_t count)
3022{
3023 char *p = (char *) page;
3024
3025 *var = simple_strtoul(p, &p, 10);
3026 return count;
3027}
3028
1da177e4 3029#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
b374d18a 3030static ssize_t __FUNC(struct elevator_queue *e, char *page) \
1da177e4 3031{ \
3d1ab40f 3032 struct cfq_data *cfqd = e->elevator_data; \
1da177e4
LT
3033 unsigned int __data = __VAR; \
3034 if (__CONV) \
3035 __data = jiffies_to_msecs(__data); \
3036 return cfq_var_show(__data, (page)); \
3037}
3038SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
22e2c507
JA
3039SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
3040SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
e572ec7e
AV
3041SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
3042SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
22e2c507
JA
3043SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
3044SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
3045SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
3046SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
963b72fc 3047SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
1da177e4
LT
3048#undef SHOW_FUNCTION
3049
3050#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
b374d18a 3051static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
1da177e4 3052{ \
3d1ab40f 3053 struct cfq_data *cfqd = e->elevator_data; \
1da177e4
LT
3054 unsigned int __data; \
3055 int ret = cfq_var_store(&__data, (page), count); \
3056 if (__data < (MIN)) \
3057 __data = (MIN); \
3058 else if (__data > (MAX)) \
3059 __data = (MAX); \
3060 if (__CONV) \
3061 *(__PTR) = msecs_to_jiffies(__data); \
3062 else \
3063 *(__PTR) = __data; \
3064 return ret; \
3065}
3066STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
fe094d98
JA
3067STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1,
3068 UINT_MAX, 1);
3069STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1,
3070 UINT_MAX, 1);
e572ec7e 3071STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
fe094d98
JA
3072STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
3073 UINT_MAX, 0);
22e2c507
JA
3074STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
3075STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
3076STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
fe094d98
JA
3077STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
3078 UINT_MAX, 0);
963b72fc 3079STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
1da177e4
LT
3080#undef STORE_FUNCTION
3081
e572ec7e
AV
3082#define CFQ_ATTR(name) \
3083 __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)
3084
3085static struct elv_fs_entry cfq_attrs[] = {
3086 CFQ_ATTR(quantum),
e572ec7e
AV
3087 CFQ_ATTR(fifo_expire_sync),
3088 CFQ_ATTR(fifo_expire_async),
3089 CFQ_ATTR(back_seek_max),
3090 CFQ_ATTR(back_seek_penalty),
3091 CFQ_ATTR(slice_sync),
3092 CFQ_ATTR(slice_async),
3093 CFQ_ATTR(slice_async_rq),
3094 CFQ_ATTR(slice_idle),
963b72fc 3095 CFQ_ATTR(low_latency),
e572ec7e 3096 __ATTR_NULL
1da177e4
LT
3097};
3098
1da177e4
LT
3099static struct elevator_type iosched_cfq = {
3100 .ops = {
3101 .elevator_merge_fn = cfq_merge,
3102 .elevator_merged_fn = cfq_merged_request,
3103 .elevator_merge_req_fn = cfq_merged_requests,
da775265 3104 .elevator_allow_merge_fn = cfq_allow_merge,
b4878f24 3105 .elevator_dispatch_fn = cfq_dispatch_requests,
1da177e4 3106 .elevator_add_req_fn = cfq_insert_request,
b4878f24 3107 .elevator_activate_req_fn = cfq_activate_request,
1da177e4
LT
3108 .elevator_deactivate_req_fn = cfq_deactivate_request,
3109 .elevator_queue_empty_fn = cfq_queue_empty,
3110 .elevator_completed_req_fn = cfq_completed_request,
21183b07
JA
3111 .elevator_former_req_fn = elv_rb_former_request,
3112 .elevator_latter_req_fn = elv_rb_latter_request,
1da177e4
LT
3113 .elevator_set_req_fn = cfq_set_request,
3114 .elevator_put_req_fn = cfq_put_request,
3115 .elevator_may_queue_fn = cfq_may_queue,
3116 .elevator_init_fn = cfq_init_queue,
3117 .elevator_exit_fn = cfq_exit_queue,
fc46379d 3118 .trim = cfq_free_io_context,
1da177e4 3119 },
3d1ab40f 3120 .elevator_attrs = cfq_attrs,
1da177e4
LT
3121 .elevator_name = "cfq",
3122 .elevator_owner = THIS_MODULE,
3123};
3124
3125static int __init cfq_init(void)
3126{
22e2c507
JA
3127 /*
3128 * could be 0 on HZ < 1000 setups
3129 */
3130 if (!cfq_slice_async)
3131 cfq_slice_async = 1;
3132 if (!cfq_slice_idle)
3133 cfq_slice_idle = 1;
3134
1da177e4
LT
3135 if (cfq_slab_setup())
3136 return -ENOMEM;
3137
2fdd82bd 3138 elv_register(&iosched_cfq);
1da177e4 3139
2fdd82bd 3140 return 0;
1da177e4
LT
3141}
3142
3143static void __exit cfq_exit(void)
3144{
6e9a4738 3145 DECLARE_COMPLETION_ONSTACK(all_gone);
1da177e4 3146 elv_unregister(&iosched_cfq);
334e94de 3147 ioc_gone = &all_gone;
fba82272
OH
3148 /* ioc_gone's update must be visible before reading ioc_count */
3149 smp_wmb();
d6de8be7
JA
3150
3151 /*
3152 * this also protects us from entering cfq_slab_kill() with
3153 * pending RCU callbacks
3154 */
245b2e70 3155 if (elv_ioc_count_read(cfq_ioc_count))
9a11b4ed 3156 wait_for_completion(&all_gone);
83521d3e 3157 cfq_slab_kill();
1da177e4
LT
3158}
3159
3160module_init(cfq_init);
3161module_exit(cfq_exit);
3162
3163MODULE_AUTHOR("Jens Axboe");
3164MODULE_LICENSE("GPL");
3165MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");