blkcg: implement blkio_policy_type->cftypes
[linux-2.6-block.git] / block / cfq-iosched.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * CFQ, or complete fairness queueing, disk scheduler.
3 *
4 * Based on ideas from a previously unfinished io
5 * scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
6 *
0fe23479 7 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
1da177e4 8 */
1da177e4 9#include <linux/module.h>
5a0e3ad6 10#include <linux/slab.h>
1cc9be68
AV
11#include <linux/blkdev.h>
12#include <linux/elevator.h>
ad5ebd2f 13#include <linux/jiffies.h>
1da177e4 14#include <linux/rbtree.h>
22e2c507 15#include <linux/ioprio.h>
7b679138 16#include <linux/blktrace_api.h>
6e736be7 17#include "blk.h"
e98ef89b 18#include "cfq.h"
1da177e4 19
0381411e
TH
20static struct blkio_policy_type blkio_policy_cfq;
21
1da177e4
LT
22/*
23 * tunables
24 */
fe094d98 25/* max queue in one round of service */
abc3c744 26static const int cfq_quantum = 8;
64100099 27static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
fe094d98
JA
28/* maximum backwards seek, in KiB */
29static const int cfq_back_max = 16 * 1024;
30/* penalty of a backwards seek */
31static const int cfq_back_penalty = 2;
64100099 32static const int cfq_slice_sync = HZ / 10;
3b18152c 33static int cfq_slice_async = HZ / 25;
64100099 34static const int cfq_slice_async_rq = 2;
caaa5f9f 35static int cfq_slice_idle = HZ / 125;
80bdf0c7 36static int cfq_group_idle = HZ / 125;
5db5d642
CZ
37static const int cfq_target_latency = HZ * 3/10; /* 300 ms */
38static const int cfq_hist_divisor = 4;
22e2c507 39
d9e7620e 40/*
0871714e 41 * offset from end of service tree
d9e7620e 42 */
0871714e 43#define CFQ_IDLE_DELAY (HZ / 5)
d9e7620e
JA
44
45/*
46 * below this threshold, we consider thinktime immediate
47 */
48#define CFQ_MIN_TT (2)
49
22e2c507 50#define CFQ_SLICE_SCALE (5)
45333d5a 51#define CFQ_HW_QUEUE_MIN (5)
25bc6b07 52#define CFQ_SERVICE_SHIFT 12
22e2c507 53
3dde36dd 54#define CFQQ_SEEK_THR (sector_t)(8 * 100)
e9ce335d 55#define CFQQ_CLOSE_THR (sector_t)(8 * 1024)
41647e7a 56#define CFQQ_SECT_THR_NONROT (sector_t)(2 * 32)
3dde36dd 57#define CFQQ_SEEKY(cfqq) (hweight32(cfqq->seek_history) > 32/8)
ae54abed 58
a612fddf
TH
59#define RQ_CIC(rq) icq_to_cic((rq)->elv.icq)
60#define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elv.priv[0])
61#define RQ_CFQG(rq) (struct cfq_group *) ((rq)->elv.priv[1])
1da177e4 62
e18b890b 63static struct kmem_cache *cfq_pool;
1da177e4 64
22e2c507
JA
65#define CFQ_PRIO_LISTS IOPRIO_BE_NR
66#define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
22e2c507
JA
67#define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
68
206dc69b 69#define sample_valid(samples) ((samples) > 80)
1fa8f6d6 70#define rb_entry_cfqg(node) rb_entry((node), struct cfq_group, rb_node)
206dc69b 71
c5869807
TH
72struct cfq_ttime {
73 unsigned long last_end_request;
74
75 unsigned long ttime_total;
76 unsigned long ttime_samples;
77 unsigned long ttime_mean;
78};
79
cc09e299
JA
80/*
81 * Most of our rbtree usage is for sorting with min extraction, so
82 * if we cache the leftmost node we don't have to walk down the tree
83 * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should
84 * move this into the elevator for the rq sorting as well.
85 */
86struct cfq_rb_root {
87 struct rb_root rb;
88 struct rb_node *left;
aa6f6a3d 89 unsigned count;
73e9ffdd 90 unsigned total_weight;
1fa8f6d6 91 u64 min_vdisktime;
f5f2b6ce 92 struct cfq_ttime ttime;
cc09e299 93};
f5f2b6ce
SL
94#define CFQ_RB_ROOT (struct cfq_rb_root) { .rb = RB_ROOT, \
95 .ttime = {.last_end_request = jiffies,},}
cc09e299 96
6118b70b
JA
97/*
98 * Per process-grouping structure
99 */
100struct cfq_queue {
101 /* reference count */
30d7b944 102 int ref;
6118b70b
JA
103 /* various state flags, see below */
104 unsigned int flags;
105 /* parent cfq_data */
106 struct cfq_data *cfqd;
107 /* service_tree member */
108 struct rb_node rb_node;
109 /* service_tree key */
110 unsigned long rb_key;
111 /* prio tree member */
112 struct rb_node p_node;
113 /* prio tree root we belong to, if any */
114 struct rb_root *p_root;
115 /* sorted list of pending requests */
116 struct rb_root sort_list;
117 /* if fifo isn't expired, next request to serve */
118 struct request *next_rq;
119 /* requests queued in sort_list */
120 int queued[2];
121 /* currently allocated requests */
122 int allocated[2];
123 /* fifo list of requests in sort_list */
124 struct list_head fifo;
125
dae739eb
VG
126 /* time when queue got scheduled in to dispatch first request. */
127 unsigned long dispatch_start;
f75edf2d 128 unsigned int allocated_slice;
c4081ba5 129 unsigned int slice_dispatch;
dae739eb
VG
130 /* time when first request from queue completed and slice started. */
131 unsigned long slice_start;
6118b70b
JA
132 unsigned long slice_end;
133 long slice_resid;
6118b70b 134
65299a3b
CH
135 /* pending priority requests */
136 int prio_pending;
6118b70b
JA
137 /* number of requests that are on the dispatch list or inside driver */
138 int dispatched;
139
140 /* io prio of this group */
141 unsigned short ioprio, org_ioprio;
4aede84b 142 unsigned short ioprio_class;
6118b70b 143
c4081ba5
RK
144 pid_t pid;
145
3dde36dd 146 u32 seek_history;
b2c18e1e
JM
147 sector_t last_request_pos;
148
aa6f6a3d 149 struct cfq_rb_root *service_tree;
df5fe3e8 150 struct cfq_queue *new_cfqq;
cdb16e8f 151 struct cfq_group *cfqg;
c4e7893e
VG
152 /* Number of sectors dispatched from queue in single dispatch round */
153 unsigned long nr_sectors;
6118b70b
JA
154};
155
c0324a02 156/*
718eee05 157 * First index in the service_trees.
c0324a02
CZ
158 * IDLE is handled separately, so it has negative index
159 */
160enum wl_prio_t {
c0324a02 161 BE_WORKLOAD = 0,
615f0259
VG
162 RT_WORKLOAD = 1,
163 IDLE_WORKLOAD = 2,
b4627321 164 CFQ_PRIO_NR,
c0324a02
CZ
165};
166
718eee05
CZ
167/*
168 * Second index in the service_trees.
169 */
170enum wl_type_t {
171 ASYNC_WORKLOAD = 0,
172 SYNC_NOIDLE_WORKLOAD = 1,
173 SYNC_WORKLOAD = 2
174};
175
cdb16e8f
VG
176/* This is per cgroup per device grouping structure */
177struct cfq_group {
1fa8f6d6
VG
178 /* group service_tree member */
179 struct rb_node rb_node;
180
181 /* group service_tree key */
182 u64 vdisktime;
25bc6b07 183 unsigned int weight;
8184f93e
JT
184 unsigned int new_weight;
185 bool needs_update;
1fa8f6d6
VG
186
187 /* number of cfqq currently on this group */
188 int nr_cfqq;
189
cdb16e8f 190 /*
4495a7d4 191 * Per group busy queues average. Useful for workload slice calc. We
b4627321
VG
192 * create the array for each prio class but at run time it is used
193 * only for RT and BE class and slot for IDLE class remains unused.
194 * This is primarily done to avoid confusion and a gcc warning.
195 */
196 unsigned int busy_queues_avg[CFQ_PRIO_NR];
197 /*
198 * rr lists of queues with requests. We maintain service trees for
199 * RT and BE classes. These trees are subdivided in subclasses
200 * of SYNC, SYNC_NOIDLE and ASYNC based on workload type. For IDLE
201 * class there is no subclassification and all the cfq queues go on
202 * a single tree service_tree_idle.
cdb16e8f
VG
203 * Counts are embedded in the cfq_rb_root
204 */
205 struct cfq_rb_root service_trees[2][3];
206 struct cfq_rb_root service_tree_idle;
dae739eb
VG
207
208 unsigned long saved_workload_slice;
209 enum wl_type_t saved_workload;
210 enum wl_prio_t saved_serving_prio;
4eef3049 211
80bdf0c7
VG
212 /* number of requests that are on the dispatch list or inside driver */
213 int dispatched;
7700fc4f 214 struct cfq_ttime ttime;
cdb16e8f 215};
718eee05 216
c5869807
TH
217struct cfq_io_cq {
218 struct io_cq icq; /* must be the first member */
219 struct cfq_queue *cfqq[2];
220 struct cfq_ttime ttime;
598971bf
TH
221 int ioprio; /* the current ioprio */
222#ifdef CONFIG_CFQ_GROUP_IOSCHED
223 uint64_t blkcg_id; /* the current blkcg ID */
224#endif
c5869807
TH
225};
226
22e2c507
JA
227/*
228 * Per block device queue structure
229 */
1da177e4 230struct cfq_data {
165125e1 231 struct request_queue *queue;
1fa8f6d6
VG
232 /* Root service tree for cfq_groups */
233 struct cfq_rb_root grp_service_tree;
f51b802c 234 struct cfq_group *root_group;
22e2c507 235
c0324a02
CZ
236 /*
237 * The priority currently being served
22e2c507 238 */
c0324a02 239 enum wl_prio_t serving_prio;
718eee05
CZ
240 enum wl_type_t serving_type;
241 unsigned long workload_expires;
cdb16e8f 242 struct cfq_group *serving_group;
a36e71f9
JA
243
244 /*
245 * Each priority tree is sorted by next_request position. These
246 * trees are used when determining if two or more queues are
247 * interleaving requests (see cfq_close_cooperator).
248 */
249 struct rb_root prio_trees[CFQ_PRIO_LISTS];
250
22e2c507 251 unsigned int busy_queues;
ef8a41df 252 unsigned int busy_sync_queues;
22e2c507 253
53c583d2
CZ
254 int rq_in_driver;
255 int rq_in_flight[2];
45333d5a
AC
256
257 /*
258 * queue-depth detection
259 */
260 int rq_queued;
25776e35 261 int hw_tag;
e459dd08
CZ
262 /*
263 * hw_tag can be
264 * -1 => indeterminate, (cfq will behave as if NCQ is present, to allow better detection)
265 * 1 => NCQ is present (hw_tag_est_depth is the estimated max depth)
266 * 0 => no NCQ
267 */
268 int hw_tag_est_depth;
269 unsigned int hw_tag_samples;
1da177e4 270
22e2c507
JA
271 /*
272 * idle window management
273 */
274 struct timer_list idle_slice_timer;
23e018a1 275 struct work_struct unplug_work;
1da177e4 276
22e2c507 277 struct cfq_queue *active_queue;
c5869807 278 struct cfq_io_cq *active_cic;
22e2c507 279
c2dea2d1
VT
280 /*
281 * async queue for each priority case
282 */
283 struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
284 struct cfq_queue *async_idle_cfqq;
15c31be4 285
6d048f53 286 sector_t last_position;
1da177e4 287
1da177e4
LT
288 /*
289 * tunables, see top of file
290 */
291 unsigned int cfq_quantum;
22e2c507 292 unsigned int cfq_fifo_expire[2];
1da177e4
LT
293 unsigned int cfq_back_penalty;
294 unsigned int cfq_back_max;
22e2c507
JA
295 unsigned int cfq_slice[2];
296 unsigned int cfq_slice_async_rq;
297 unsigned int cfq_slice_idle;
80bdf0c7 298 unsigned int cfq_group_idle;
963b72fc 299 unsigned int cfq_latency;
d9ff4187 300
6118b70b
JA
301 /*
302 * Fallback dummy cfqq for extreme OOM conditions
303 */
304 struct cfq_queue oom_cfqq;
365722bb 305
573412b2 306 unsigned long last_delayed_sync;
1da177e4
LT
307};
308
25fb5169
VG
309static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
310
cdb16e8f
VG
311static struct cfq_rb_root *service_tree_for(struct cfq_group *cfqg,
312 enum wl_prio_t prio,
65b32a57 313 enum wl_type_t type)
c0324a02 314{
1fa8f6d6
VG
315 if (!cfqg)
316 return NULL;
317
c0324a02 318 if (prio == IDLE_WORKLOAD)
cdb16e8f 319 return &cfqg->service_tree_idle;
c0324a02 320
cdb16e8f 321 return &cfqg->service_trees[prio][type];
c0324a02
CZ
322}
323
3b18152c 324enum cfqq_state_flags {
b0b8d749
JA
325 CFQ_CFQQ_FLAG_on_rr = 0, /* on round-robin busy list */
326 CFQ_CFQQ_FLAG_wait_request, /* waiting for a request */
b029195d 327 CFQ_CFQQ_FLAG_must_dispatch, /* must be allowed a dispatch */
b0b8d749 328 CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */
b0b8d749
JA
329 CFQ_CFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */
330 CFQ_CFQQ_FLAG_idle_window, /* slice idling enabled */
331 CFQ_CFQQ_FLAG_prio_changed, /* task priority has changed */
44f7c160 332 CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */
91fac317 333 CFQ_CFQQ_FLAG_sync, /* synchronous queue */
b3b6d040 334 CFQ_CFQQ_FLAG_coop, /* cfqq is shared */
ae54abed 335 CFQ_CFQQ_FLAG_split_coop, /* shared cfqq will be splitted */
76280aff 336 CFQ_CFQQ_FLAG_deep, /* sync cfqq experienced large depth */
f75edf2d 337 CFQ_CFQQ_FLAG_wait_busy, /* Waiting for next request */
3b18152c
JA
338};
339
340#define CFQ_CFQQ_FNS(name) \
341static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq) \
342{ \
fe094d98 343 (cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name); \
3b18152c
JA
344} \
345static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq) \
346{ \
fe094d98 347 (cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name); \
3b18152c
JA
348} \
349static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \
350{ \
fe094d98 351 return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0; \
3b18152c
JA
352}
353
354CFQ_CFQQ_FNS(on_rr);
355CFQ_CFQQ_FNS(wait_request);
b029195d 356CFQ_CFQQ_FNS(must_dispatch);
3b18152c 357CFQ_CFQQ_FNS(must_alloc_slice);
3b18152c
JA
358CFQ_CFQQ_FNS(fifo_expire);
359CFQ_CFQQ_FNS(idle_window);
360CFQ_CFQQ_FNS(prio_changed);
44f7c160 361CFQ_CFQQ_FNS(slice_new);
91fac317 362CFQ_CFQQ_FNS(sync);
a36e71f9 363CFQ_CFQQ_FNS(coop);
ae54abed 364CFQ_CFQQ_FNS(split_coop);
76280aff 365CFQ_CFQQ_FNS(deep);
f75edf2d 366CFQ_CFQQ_FNS(wait_busy);
3b18152c
JA
367#undef CFQ_CFQQ_FNS
368
afc24d49 369#ifdef CONFIG_CFQ_GROUP_IOSCHED
eb7d8c07
TH
370static inline struct cfq_group *blkg_to_cfqg(struct blkio_group *blkg)
371{
372 return blkg_to_pdata(blkg, &blkio_policy_cfq);
373}
374
375static inline struct blkio_group *cfqg_to_blkg(struct cfq_group *cfqg)
376{
aaec55a0 377 return pdata_to_blkg(cfqg);
eb7d8c07
TH
378}
379
380static inline void cfqg_get(struct cfq_group *cfqg)
381{
382 return blkg_get(cfqg_to_blkg(cfqg));
383}
384
385static inline void cfqg_put(struct cfq_group *cfqg)
386{
387 return blkg_put(cfqg_to_blkg(cfqg));
388}
389
2868ef7b
VG
390#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
391 blk_add_trace_msg((cfqd)->queue, "cfq%d%c %s " fmt, (cfqq)->pid, \
392 cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
0381411e 393 blkg_path(cfqg_to_blkg((cfqq)->cfqg)), ##args)
2868ef7b
VG
394
395#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) \
396 blk_add_trace_msg((cfqd)->queue, "%s " fmt, \
0381411e 397 blkg_path(cfqg_to_blkg((cfqg))), ##args) \
2868ef7b 398
eb7d8c07
TH
399#else /* CONFIG_CFQ_GROUP_IOSCHED */
400
401static inline struct cfq_group *blkg_to_cfqg(struct blkio_group *blkg) { return NULL; }
402static inline struct blkio_group *cfqg_to_blkg(struct cfq_group *cfqg) { return NULL; }
403static inline void cfqg_get(struct cfq_group *cfqg) { }
404static inline void cfqg_put(struct cfq_group *cfqg) { }
405
7b679138
JA
406#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
407 blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args)
4495a7d4 408#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do {} while (0)
eb7d8c07
TH
409
410#endif /* CONFIG_CFQ_GROUP_IOSCHED */
411
7b679138
JA
412#define cfq_log(cfqd, fmt, args...) \
413 blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
414
615f0259
VG
415/* Traverses through cfq group service trees */
416#define for_each_cfqg_st(cfqg, i, j, st) \
417 for (i = 0; i <= IDLE_WORKLOAD; i++) \
418 for (j = 0, st = i < IDLE_WORKLOAD ? &cfqg->service_trees[i][j]\
419 : &cfqg->service_tree_idle; \
420 (i < IDLE_WORKLOAD && j <= SYNC_WORKLOAD) || \
421 (i == IDLE_WORKLOAD && j == 0); \
422 j++, st = i < IDLE_WORKLOAD ? \
423 &cfqg->service_trees[i][j]: NULL) \
424
f5f2b6ce
SL
425static inline bool cfq_io_thinktime_big(struct cfq_data *cfqd,
426 struct cfq_ttime *ttime, bool group_idle)
427{
428 unsigned long slice;
429 if (!sample_valid(ttime->ttime_samples))
430 return false;
431 if (group_idle)
432 slice = cfqd->cfq_group_idle;
433 else
434 slice = cfqd->cfq_slice_idle;
435 return ttime->ttime_mean > slice;
436}
615f0259 437
02b35081
VG
438static inline bool iops_mode(struct cfq_data *cfqd)
439{
440 /*
441 * If we are not idling on queues and it is a NCQ drive, parallel
442 * execution of requests is on and measuring time is not possible
443 * in most of the cases until and unless we drive shallower queue
444 * depths and that becomes a performance bottleneck. In such cases
445 * switch to start providing fairness in terms of number of IOs.
446 */
447 if (!cfqd->cfq_slice_idle && cfqd->hw_tag)
448 return true;
449 else
450 return false;
451}
452
c0324a02
CZ
453static inline enum wl_prio_t cfqq_prio(struct cfq_queue *cfqq)
454{
455 if (cfq_class_idle(cfqq))
456 return IDLE_WORKLOAD;
457 if (cfq_class_rt(cfqq))
458 return RT_WORKLOAD;
459 return BE_WORKLOAD;
460}
461
718eee05
CZ
462
463static enum wl_type_t cfqq_type(struct cfq_queue *cfqq)
464{
465 if (!cfq_cfqq_sync(cfqq))
466 return ASYNC_WORKLOAD;
467 if (!cfq_cfqq_idle_window(cfqq))
468 return SYNC_NOIDLE_WORKLOAD;
469 return SYNC_WORKLOAD;
470}
471
58ff82f3
VG
472static inline int cfq_group_busy_queues_wl(enum wl_prio_t wl,
473 struct cfq_data *cfqd,
474 struct cfq_group *cfqg)
c0324a02
CZ
475{
476 if (wl == IDLE_WORKLOAD)
cdb16e8f 477 return cfqg->service_tree_idle.count;
c0324a02 478
cdb16e8f
VG
479 return cfqg->service_trees[wl][ASYNC_WORKLOAD].count
480 + cfqg->service_trees[wl][SYNC_NOIDLE_WORKLOAD].count
481 + cfqg->service_trees[wl][SYNC_WORKLOAD].count;
c0324a02
CZ
482}
483
f26bd1f0
VG
484static inline int cfqg_busy_async_queues(struct cfq_data *cfqd,
485 struct cfq_group *cfqg)
486{
487 return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count
488 + cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count;
489}
490
165125e1 491static void cfq_dispatch_insert(struct request_queue *, struct request *);
4f85cb96 492static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, bool is_sync,
abede6da 493 struct cfq_io_cq *cic, struct bio *bio,
4f85cb96 494 gfp_t gfp_mask);
91fac317 495
c5869807
TH
496static inline struct cfq_io_cq *icq_to_cic(struct io_cq *icq)
497{
498 /* cic->icq is the first member, %NULL will convert to %NULL */
499 return container_of(icq, struct cfq_io_cq, icq);
500}
501
47fdd4ca
TH
502static inline struct cfq_io_cq *cfq_cic_lookup(struct cfq_data *cfqd,
503 struct io_context *ioc)
504{
505 if (ioc)
506 return icq_to_cic(ioc_lookup_icq(ioc, cfqd->queue));
507 return NULL;
508}
509
c5869807 510static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_cq *cic, bool is_sync)
91fac317 511{
a6151c3a 512 return cic->cfqq[is_sync];
91fac317
VT
513}
514
c5869807
TH
515static inline void cic_set_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq,
516 bool is_sync)
91fac317 517{
a6151c3a 518 cic->cfqq[is_sync] = cfqq;
91fac317
VT
519}
520
c5869807 521static inline struct cfq_data *cic_to_cfqd(struct cfq_io_cq *cic)
bca4b914 522{
c5869807 523 return cic->icq.q->elevator->elevator_data;
bca4b914
KK
524}
525
91fac317
VT
526/*
527 * We regard a request as SYNC, if it's either a read or has the SYNC bit
528 * set (in which case it could also be direct WRITE).
529 */
a6151c3a 530static inline bool cfq_bio_sync(struct bio *bio)
91fac317 531{
7b6d91da 532 return bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC);
91fac317 533}
1da177e4 534
99f95e52
AM
535/*
536 * scheduler run of queue, if there are requests pending and no one in the
537 * driver that will restart queueing
538 */
23e018a1 539static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
99f95e52 540{
7b679138
JA
541 if (cfqd->busy_queues) {
542 cfq_log(cfqd, "schedule dispatch");
23e018a1 543 kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work);
7b679138 544 }
99f95e52
AM
545}
546
44f7c160
JA
547/*
548 * Scale schedule slice based on io priority. Use the sync time slice only
549 * if a queue is marked sync and has sync io queued. A sync queue with async
550 * io only, should not get full sync slice length.
551 */
a6151c3a 552static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync,
d9e7620e 553 unsigned short prio)
44f7c160 554{
d9e7620e 555 const int base_slice = cfqd->cfq_slice[sync];
44f7c160 556
d9e7620e
JA
557 WARN_ON(prio >= IOPRIO_BE_NR);
558
559 return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio));
560}
44f7c160 561
d9e7620e
JA
562static inline int
563cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
564{
565 return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
44f7c160
JA
566}
567
25bc6b07
VG
568static inline u64 cfq_scale_slice(unsigned long delta, struct cfq_group *cfqg)
569{
570 u64 d = delta << CFQ_SERVICE_SHIFT;
571
572 d = d * BLKIO_WEIGHT_DEFAULT;
573 do_div(d, cfqg->weight);
574 return d;
575}
576
577static inline u64 max_vdisktime(u64 min_vdisktime, u64 vdisktime)
578{
579 s64 delta = (s64)(vdisktime - min_vdisktime);
580 if (delta > 0)
581 min_vdisktime = vdisktime;
582
583 return min_vdisktime;
584}
585
586static inline u64 min_vdisktime(u64 min_vdisktime, u64 vdisktime)
587{
588 s64 delta = (s64)(vdisktime - min_vdisktime);
589 if (delta < 0)
590 min_vdisktime = vdisktime;
591
592 return min_vdisktime;
593}
594
595static void update_min_vdisktime(struct cfq_rb_root *st)
596{
25bc6b07
VG
597 struct cfq_group *cfqg;
598
25bc6b07
VG
599 if (st->left) {
600 cfqg = rb_entry_cfqg(st->left);
a6032710
GJ
601 st->min_vdisktime = max_vdisktime(st->min_vdisktime,
602 cfqg->vdisktime);
25bc6b07 603 }
25bc6b07
VG
604}
605
5db5d642
CZ
606/*
607 * get averaged number of queues of RT/BE priority.
608 * average is updated, with a formula that gives more weight to higher numbers,
609 * to quickly follows sudden increases and decrease slowly
610 */
611
58ff82f3
VG
612static inline unsigned cfq_group_get_avg_queues(struct cfq_data *cfqd,
613 struct cfq_group *cfqg, bool rt)
5869619c 614{
5db5d642
CZ
615 unsigned min_q, max_q;
616 unsigned mult = cfq_hist_divisor - 1;
617 unsigned round = cfq_hist_divisor / 2;
58ff82f3 618 unsigned busy = cfq_group_busy_queues_wl(rt, cfqd, cfqg);
5db5d642 619
58ff82f3
VG
620 min_q = min(cfqg->busy_queues_avg[rt], busy);
621 max_q = max(cfqg->busy_queues_avg[rt], busy);
622 cfqg->busy_queues_avg[rt] = (mult * max_q + min_q + round) /
5db5d642 623 cfq_hist_divisor;
58ff82f3
VG
624 return cfqg->busy_queues_avg[rt];
625}
626
627static inline unsigned
628cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg)
629{
630 struct cfq_rb_root *st = &cfqd->grp_service_tree;
631
632 return cfq_target_latency * cfqg->weight / st->total_weight;
5db5d642
CZ
633}
634
c553f8e3 635static inline unsigned
ba5bd520 636cfq_scaled_cfqq_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
44f7c160 637{
5db5d642
CZ
638 unsigned slice = cfq_prio_to_slice(cfqd, cfqq);
639 if (cfqd->cfq_latency) {
58ff82f3
VG
640 /*
641 * interested queues (we consider only the ones with the same
642 * priority class in the cfq group)
643 */
644 unsigned iq = cfq_group_get_avg_queues(cfqd, cfqq->cfqg,
645 cfq_class_rt(cfqq));
5db5d642
CZ
646 unsigned sync_slice = cfqd->cfq_slice[1];
647 unsigned expect_latency = sync_slice * iq;
58ff82f3
VG
648 unsigned group_slice = cfq_group_slice(cfqd, cfqq->cfqg);
649
650 if (expect_latency > group_slice) {
5db5d642
CZ
651 unsigned base_low_slice = 2 * cfqd->cfq_slice_idle;
652 /* scale low_slice according to IO priority
653 * and sync vs async */
654 unsigned low_slice =
655 min(slice, base_low_slice * slice / sync_slice);
656 /* the adapted slice value is scaled to fit all iqs
657 * into the target latency */
58ff82f3 658 slice = max(slice * group_slice / expect_latency,
5db5d642
CZ
659 low_slice);
660 }
661 }
c553f8e3
SL
662 return slice;
663}
664
665static inline void
666cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
667{
ba5bd520 668 unsigned slice = cfq_scaled_cfqq_slice(cfqd, cfqq);
c553f8e3 669
dae739eb 670 cfqq->slice_start = jiffies;
5db5d642 671 cfqq->slice_end = jiffies + slice;
f75edf2d 672 cfqq->allocated_slice = slice;
7b679138 673 cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies);
44f7c160
JA
674}
675
676/*
677 * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
678 * isn't valid until the first request from the dispatch is activated
679 * and the slice time set.
680 */
a6151c3a 681static inline bool cfq_slice_used(struct cfq_queue *cfqq)
44f7c160
JA
682{
683 if (cfq_cfqq_slice_new(cfqq))
c1e44756 684 return false;
44f7c160 685 if (time_before(jiffies, cfqq->slice_end))
c1e44756 686 return false;
44f7c160 687
c1e44756 688 return true;
44f7c160
JA
689}
690
1da177e4 691/*
5e705374 692 * Lifted from AS - choose which of rq1 and rq2 that is best served now.
1da177e4 693 * We choose the request that is closest to the head right now. Distance
e8a99053 694 * behind the head is penalized and only allowed to a certain extent.
1da177e4 695 */
5e705374 696static struct request *
cf7c25cf 697cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2, sector_t last)
1da177e4 698{
cf7c25cf 699 sector_t s1, s2, d1 = 0, d2 = 0;
1da177e4 700 unsigned long back_max;
e8a99053
AM
701#define CFQ_RQ1_WRAP 0x01 /* request 1 wraps */
702#define CFQ_RQ2_WRAP 0x02 /* request 2 wraps */
703 unsigned wrap = 0; /* bit mask: requests behind the disk head? */
1da177e4 704
5e705374
JA
705 if (rq1 == NULL || rq1 == rq2)
706 return rq2;
707 if (rq2 == NULL)
708 return rq1;
9c2c38a1 709
229836bd
NK
710 if (rq_is_sync(rq1) != rq_is_sync(rq2))
711 return rq_is_sync(rq1) ? rq1 : rq2;
712
65299a3b
CH
713 if ((rq1->cmd_flags ^ rq2->cmd_flags) & REQ_PRIO)
714 return rq1->cmd_flags & REQ_PRIO ? rq1 : rq2;
b53d1ed7 715
83096ebf
TH
716 s1 = blk_rq_pos(rq1);
717 s2 = blk_rq_pos(rq2);
1da177e4 718
1da177e4
LT
719 /*
720 * by definition, 1KiB is 2 sectors
721 */
722 back_max = cfqd->cfq_back_max * 2;
723
724 /*
725 * Strict one way elevator _except_ in the case where we allow
726 * short backward seeks which are biased as twice the cost of a
727 * similar forward seek.
728 */
729 if (s1 >= last)
730 d1 = s1 - last;
731 else if (s1 + back_max >= last)
732 d1 = (last - s1) * cfqd->cfq_back_penalty;
733 else
e8a99053 734 wrap |= CFQ_RQ1_WRAP;
1da177e4
LT
735
736 if (s2 >= last)
737 d2 = s2 - last;
738 else if (s2 + back_max >= last)
739 d2 = (last - s2) * cfqd->cfq_back_penalty;
740 else
e8a99053 741 wrap |= CFQ_RQ2_WRAP;
1da177e4
LT
742
743 /* Found required data */
e8a99053
AM
744
745 /*
746 * By doing switch() on the bit mask "wrap" we avoid having to
747 * check two variables for all permutations: --> faster!
748 */
749 switch (wrap) {
5e705374 750 case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
e8a99053 751 if (d1 < d2)
5e705374 752 return rq1;
e8a99053 753 else if (d2 < d1)
5e705374 754 return rq2;
e8a99053
AM
755 else {
756 if (s1 >= s2)
5e705374 757 return rq1;
e8a99053 758 else
5e705374 759 return rq2;
e8a99053 760 }
1da177e4 761
e8a99053 762 case CFQ_RQ2_WRAP:
5e705374 763 return rq1;
e8a99053 764 case CFQ_RQ1_WRAP:
5e705374
JA
765 return rq2;
766 case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
e8a99053
AM
767 default:
768 /*
769 * Since both rqs are wrapped,
770 * start with the one that's further behind head
771 * (--> only *one* back seek required),
772 * since back seek takes more time than forward.
773 */
774 if (s1 <= s2)
5e705374 775 return rq1;
1da177e4 776 else
5e705374 777 return rq2;
1da177e4
LT
778 }
779}
780
498d3aa2
JA
781/*
782 * The below is leftmost cache rbtree addon
783 */
0871714e 784static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root)
cc09e299 785{
615f0259
VG
786 /* Service tree is empty */
787 if (!root->count)
788 return NULL;
789
cc09e299
JA
790 if (!root->left)
791 root->left = rb_first(&root->rb);
792
0871714e
JA
793 if (root->left)
794 return rb_entry(root->left, struct cfq_queue, rb_node);
795
796 return NULL;
cc09e299
JA
797}
798
1fa8f6d6
VG
799static struct cfq_group *cfq_rb_first_group(struct cfq_rb_root *root)
800{
801 if (!root->left)
802 root->left = rb_first(&root->rb);
803
804 if (root->left)
805 return rb_entry_cfqg(root->left);
806
807 return NULL;
808}
809
a36e71f9
JA
810static void rb_erase_init(struct rb_node *n, struct rb_root *root)
811{
812 rb_erase(n, root);
813 RB_CLEAR_NODE(n);
814}
815
cc09e299
JA
816static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
817{
818 if (root->left == n)
819 root->left = NULL;
a36e71f9 820 rb_erase_init(n, &root->rb);
aa6f6a3d 821 --root->count;
cc09e299
JA
822}
823
1da177e4
LT
824/*
825 * would be nice to take fifo expire time into account as well
826 */
5e705374
JA
827static struct request *
828cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
829 struct request *last)
1da177e4 830{
21183b07
JA
831 struct rb_node *rbnext = rb_next(&last->rb_node);
832 struct rb_node *rbprev = rb_prev(&last->rb_node);
5e705374 833 struct request *next = NULL, *prev = NULL;
1da177e4 834
21183b07 835 BUG_ON(RB_EMPTY_NODE(&last->rb_node));
1da177e4
LT
836
837 if (rbprev)
5e705374 838 prev = rb_entry_rq(rbprev);
1da177e4 839
21183b07 840 if (rbnext)
5e705374 841 next = rb_entry_rq(rbnext);
21183b07
JA
842 else {
843 rbnext = rb_first(&cfqq->sort_list);
844 if (rbnext && rbnext != &last->rb_node)
5e705374 845 next = rb_entry_rq(rbnext);
21183b07 846 }
1da177e4 847
cf7c25cf 848 return cfq_choose_req(cfqd, next, prev, blk_rq_pos(last));
1da177e4
LT
849}
850
d9e7620e
JA
851static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
852 struct cfq_queue *cfqq)
1da177e4 853{
d9e7620e
JA
854 /*
855 * just an approximation, should be ok.
856 */
cdb16e8f 857 return (cfqq->cfqg->nr_cfqq - 1) * (cfq_prio_slice(cfqd, 1, 0) -
464191c6 858 cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio));
d9e7620e
JA
859}
860
1fa8f6d6
VG
861static inline s64
862cfqg_key(struct cfq_rb_root *st, struct cfq_group *cfqg)
863{
864 return cfqg->vdisktime - st->min_vdisktime;
865}
866
867static void
868__cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
869{
870 struct rb_node **node = &st->rb.rb_node;
871 struct rb_node *parent = NULL;
872 struct cfq_group *__cfqg;
873 s64 key = cfqg_key(st, cfqg);
874 int left = 1;
875
876 while (*node != NULL) {
877 parent = *node;
878 __cfqg = rb_entry_cfqg(parent);
879
880 if (key < cfqg_key(st, __cfqg))
881 node = &parent->rb_left;
882 else {
883 node = &parent->rb_right;
884 left = 0;
885 }
886 }
887
888 if (left)
889 st->left = &cfqg->rb_node;
890
891 rb_link_node(&cfqg->rb_node, parent, node);
892 rb_insert_color(&cfqg->rb_node, &st->rb);
893}
894
895static void
8184f93e
JT
896cfq_update_group_weight(struct cfq_group *cfqg)
897{
898 BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
899 if (cfqg->needs_update) {
900 cfqg->weight = cfqg->new_weight;
901 cfqg->needs_update = false;
902 }
903}
904
905static void
906cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
907{
908 BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
909
910 cfq_update_group_weight(cfqg);
911 __cfq_group_service_tree_add(st, cfqg);
912 st->total_weight += cfqg->weight;
913}
914
915static void
916cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
1fa8f6d6
VG
917{
918 struct cfq_rb_root *st = &cfqd->grp_service_tree;
919 struct cfq_group *__cfqg;
920 struct rb_node *n;
921
922 cfqg->nr_cfqq++;
760701bf 923 if (!RB_EMPTY_NODE(&cfqg->rb_node))
1fa8f6d6
VG
924 return;
925
926 /*
927 * Currently put the group at the end. Later implement something
928 * so that groups get lesser vtime based on their weights, so that
25985edc 929 * if group does not loose all if it was not continuously backlogged.
1fa8f6d6
VG
930 */
931 n = rb_last(&st->rb);
932 if (n) {
933 __cfqg = rb_entry_cfqg(n);
934 cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY;
935 } else
936 cfqg->vdisktime = st->min_vdisktime;
8184f93e
JT
937 cfq_group_service_tree_add(st, cfqg);
938}
1fa8f6d6 939
8184f93e
JT
940static void
941cfq_group_service_tree_del(struct cfq_rb_root *st, struct cfq_group *cfqg)
942{
943 st->total_weight -= cfqg->weight;
944 if (!RB_EMPTY_NODE(&cfqg->rb_node))
945 cfq_rb_erase(&cfqg->rb_node, st);
1fa8f6d6
VG
946}
947
948static void
8184f93e 949cfq_group_notify_queue_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
1fa8f6d6
VG
950{
951 struct cfq_rb_root *st = &cfqd->grp_service_tree;
952
953 BUG_ON(cfqg->nr_cfqq < 1);
954 cfqg->nr_cfqq--;
25bc6b07 955
1fa8f6d6
VG
956 /* If there are other cfq queues under this group, don't delete it */
957 if (cfqg->nr_cfqq)
958 return;
959
2868ef7b 960 cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
8184f93e 961 cfq_group_service_tree_del(st, cfqg);
dae739eb 962 cfqg->saved_workload_slice = 0;
c1768268
TH
963 cfq_blkiocg_update_dequeue_stats(cfqg_to_blkg(cfqg),
964 &blkio_policy_cfq, 1);
dae739eb
VG
965}
966
167400d3
JT
967static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq,
968 unsigned int *unaccounted_time)
dae739eb 969{
f75edf2d 970 unsigned int slice_used;
dae739eb
VG
971
972 /*
973 * Queue got expired before even a single request completed or
974 * got expired immediately after first request completion.
975 */
976 if (!cfqq->slice_start || cfqq->slice_start == jiffies) {
977 /*
978 * Also charge the seek time incurred to the group, otherwise
979 * if there are mutiple queues in the group, each can dispatch
980 * a single request on seeky media and cause lots of seek time
981 * and group will never know it.
982 */
983 slice_used = max_t(unsigned, (jiffies - cfqq->dispatch_start),
984 1);
985 } else {
986 slice_used = jiffies - cfqq->slice_start;
167400d3
JT
987 if (slice_used > cfqq->allocated_slice) {
988 *unaccounted_time = slice_used - cfqq->allocated_slice;
f75edf2d 989 slice_used = cfqq->allocated_slice;
167400d3
JT
990 }
991 if (time_after(cfqq->slice_start, cfqq->dispatch_start))
992 *unaccounted_time += cfqq->slice_start -
993 cfqq->dispatch_start;
dae739eb
VG
994 }
995
dae739eb
VG
996 return slice_used;
997}
998
999static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
e5ff082e 1000 struct cfq_queue *cfqq)
dae739eb
VG
1001{
1002 struct cfq_rb_root *st = &cfqd->grp_service_tree;
167400d3 1003 unsigned int used_sl, charge, unaccounted_sl = 0;
f26bd1f0
VG
1004 int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
1005 - cfqg->service_tree_idle.count;
1006
1007 BUG_ON(nr_sync < 0);
167400d3 1008 used_sl = charge = cfq_cfqq_slice_usage(cfqq, &unaccounted_sl);
dae739eb 1009
02b35081
VG
1010 if (iops_mode(cfqd))
1011 charge = cfqq->slice_dispatch;
1012 else if (!cfq_cfqq_sync(cfqq) && !nr_sync)
1013 charge = cfqq->allocated_slice;
dae739eb
VG
1014
1015 /* Can't update vdisktime while group is on service tree */
8184f93e 1016 cfq_group_service_tree_del(st, cfqg);
02b35081 1017 cfqg->vdisktime += cfq_scale_slice(charge, cfqg);
8184f93e
JT
1018 /* If a new weight was requested, update now, off tree */
1019 cfq_group_service_tree_add(st, cfqg);
dae739eb
VG
1020
1021 /* This group is being expired. Save the context */
1022 if (time_after(cfqd->workload_expires, jiffies)) {
1023 cfqg->saved_workload_slice = cfqd->workload_expires
1024 - jiffies;
1025 cfqg->saved_workload = cfqd->serving_type;
1026 cfqg->saved_serving_prio = cfqd->serving_prio;
1027 } else
1028 cfqg->saved_workload_slice = 0;
2868ef7b
VG
1029
1030 cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
1031 st->min_vdisktime);
fd16d263
JP
1032 cfq_log_cfqq(cfqq->cfqd, cfqq,
1033 "sl_used=%u disp=%u charge=%u iops=%u sect=%lu",
1034 used_sl, cfqq->slice_dispatch, charge,
1035 iops_mode(cfqd), cfqq->nr_sectors);
c1768268
TH
1036 cfq_blkiocg_update_timeslice_used(cfqg_to_blkg(cfqg), &blkio_policy_cfq,
1037 used_sl, unaccounted_sl);
1038 cfq_blkiocg_set_start_empty_time(cfqg_to_blkg(cfqg), &blkio_policy_cfq);
1fa8f6d6
VG
1039}
1040
f51b802c
TH
1041/**
1042 * cfq_init_cfqg_base - initialize base part of a cfq_group
1043 * @cfqg: cfq_group to initialize
1044 *
1045 * Initialize the base part which is used whether %CONFIG_CFQ_GROUP_IOSCHED
1046 * is enabled or not.
1047 */
1048static void cfq_init_cfqg_base(struct cfq_group *cfqg)
1049{
1050 struct cfq_rb_root *st;
1051 int i, j;
1052
1053 for_each_cfqg_st(cfqg, i, j, st)
1054 *st = CFQ_RB_ROOT;
1055 RB_CLEAR_NODE(&cfqg->rb_node);
1056
1057 cfqg->ttime.last_end_request = jiffies;
1058}
1059
25fb5169 1060#ifdef CONFIG_CFQ_GROUP_IOSCHED
ca32aefc
TH
1061static void cfq_update_blkio_group_weight(struct request_queue *q,
1062 struct blkio_group *blkg,
8aea4545 1063 unsigned int weight)
f8d461d6 1064{
0381411e
TH
1065 struct cfq_group *cfqg = blkg_to_cfqg(blkg);
1066
8184f93e
JT
1067 cfqg->new_weight = weight;
1068 cfqg->needs_update = true;
f8d461d6
VG
1069}
1070
0381411e 1071static void cfq_init_blkio_group(struct blkio_group *blkg)
f469a7b4 1072{
0381411e 1073 struct cfq_group *cfqg = blkg_to_cfqg(blkg);
25fb5169 1074
f51b802c 1075 cfq_init_cfqg_base(cfqg);
0381411e 1076 cfqg->weight = blkg->blkcg->weight;
25fb5169
VG
1077}
1078
1079/*
3e59cf9d
VG
1080 * Search for the cfq group current task belongs to. request_queue lock must
1081 * be held.
25fb5169 1082 */
cd1604fa
TH
1083static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd,
1084 struct blkio_cgroup *blkcg)
25fb5169 1085{
f469a7b4 1086 struct request_queue *q = cfqd->queue;
cd1604fa 1087 struct cfq_group *cfqg = NULL;
25fb5169 1088
cd1604fa
TH
1089 /* avoid lookup for the common case where there's no blkio cgroup */
1090 if (blkcg == &blkio_root_cgroup) {
1091 cfqg = cfqd->root_group;
1092 } else {
1093 struct blkio_group *blkg;
f469a7b4 1094
aaec55a0 1095 blkg = blkg_lookup_create(blkcg, q, false);
cd1604fa 1096 if (!IS_ERR(blkg))
0381411e 1097 cfqg = blkg_to_cfqg(blkg);
cd1604fa 1098 }
f469a7b4 1099
25fb5169
VG
1100 return cfqg;
1101}
1102
1103static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
1104{
1105 /* Currently, all async queues are mapped to root group */
1106 if (!cfq_cfqq_sync(cfqq))
f51b802c 1107 cfqg = cfqq->cfqd->root_group;
25fb5169
VG
1108
1109 cfqq->cfqg = cfqg;
b1c35769 1110 /* cfqq reference on cfqg */
eb7d8c07 1111 cfqg_get(cfqg);
b1c35769
VG
1112}
1113
25fb5169 1114#else /* GROUP_IOSCHED */
cd1604fa
TH
1115static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd,
1116 struct blkio_cgroup *blkcg)
25fb5169 1117{
f51b802c 1118 return cfqd->root_group;
25fb5169 1119}
7f1dc8a2 1120
25fb5169
VG
1121static inline void
1122cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) {
1123 cfqq->cfqg = cfqg;
1124}
1125
1126#endif /* GROUP_IOSCHED */
1127
498d3aa2 1128/*
c0324a02 1129 * The cfqd->service_trees holds all pending cfq_queue's that have
498d3aa2
JA
1130 * requests waiting to be processed. It is sorted in the order that
1131 * we will service the queues.
1132 */
a36e71f9 1133static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
a6151c3a 1134 bool add_front)
d9e7620e 1135{
0871714e
JA
1136 struct rb_node **p, *parent;
1137 struct cfq_queue *__cfqq;
d9e7620e 1138 unsigned long rb_key;
c0324a02 1139 struct cfq_rb_root *service_tree;
498d3aa2 1140 int left;
dae739eb 1141 int new_cfqq = 1;
ae30c286 1142
cdb16e8f 1143 service_tree = service_tree_for(cfqq->cfqg, cfqq_prio(cfqq),
65b32a57 1144 cfqq_type(cfqq));
0871714e
JA
1145 if (cfq_class_idle(cfqq)) {
1146 rb_key = CFQ_IDLE_DELAY;
aa6f6a3d 1147 parent = rb_last(&service_tree->rb);
0871714e
JA
1148 if (parent && parent != &cfqq->rb_node) {
1149 __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
1150 rb_key += __cfqq->rb_key;
1151 } else
1152 rb_key += jiffies;
1153 } else if (!add_front) {
b9c8946b
JA
1154 /*
1155 * Get our rb key offset. Subtract any residual slice
1156 * value carried from last service. A negative resid
1157 * count indicates slice overrun, and this should position
1158 * the next service time further away in the tree.
1159 */
edd75ffd 1160 rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
b9c8946b 1161 rb_key -= cfqq->slice_resid;
edd75ffd 1162 cfqq->slice_resid = 0;
48e025e6
CZ
1163 } else {
1164 rb_key = -HZ;
aa6f6a3d 1165 __cfqq = cfq_rb_first(service_tree);
48e025e6
CZ
1166 rb_key += __cfqq ? __cfqq->rb_key : jiffies;
1167 }
1da177e4 1168
d9e7620e 1169 if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
dae739eb 1170 new_cfqq = 0;
99f9628a 1171 /*
d9e7620e 1172 * same position, nothing more to do
99f9628a 1173 */
c0324a02
CZ
1174 if (rb_key == cfqq->rb_key &&
1175 cfqq->service_tree == service_tree)
d9e7620e 1176 return;
1da177e4 1177
aa6f6a3d
CZ
1178 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
1179 cfqq->service_tree = NULL;
1da177e4 1180 }
d9e7620e 1181
498d3aa2 1182 left = 1;
0871714e 1183 parent = NULL;
aa6f6a3d
CZ
1184 cfqq->service_tree = service_tree;
1185 p = &service_tree->rb.rb_node;
d9e7620e 1186 while (*p) {
67060e37 1187 struct rb_node **n;
cc09e299 1188
d9e7620e
JA
1189 parent = *p;
1190 __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
1191
0c534e0a 1192 /*
c0324a02 1193 * sort by key, that represents service time.
0c534e0a 1194 */
c0324a02 1195 if (time_before(rb_key, __cfqq->rb_key))
67060e37 1196 n = &(*p)->rb_left;
c0324a02 1197 else {
67060e37 1198 n = &(*p)->rb_right;
cc09e299 1199 left = 0;
c0324a02 1200 }
67060e37
JA
1201
1202 p = n;
d9e7620e
JA
1203 }
1204
cc09e299 1205 if (left)
aa6f6a3d 1206 service_tree->left = &cfqq->rb_node;
cc09e299 1207
d9e7620e
JA
1208 cfqq->rb_key = rb_key;
1209 rb_link_node(&cfqq->rb_node, parent, p);
aa6f6a3d
CZ
1210 rb_insert_color(&cfqq->rb_node, &service_tree->rb);
1211 service_tree->count++;
20359f27 1212 if (add_front || !new_cfqq)
dae739eb 1213 return;
8184f93e 1214 cfq_group_notify_queue_add(cfqd, cfqq->cfqg);
1da177e4
LT
1215}
1216
a36e71f9 1217static struct cfq_queue *
f2d1f0ae
JA
1218cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root,
1219 sector_t sector, struct rb_node **ret_parent,
1220 struct rb_node ***rb_link)
a36e71f9 1221{
a36e71f9
JA
1222 struct rb_node **p, *parent;
1223 struct cfq_queue *cfqq = NULL;
1224
1225 parent = NULL;
1226 p = &root->rb_node;
1227 while (*p) {
1228 struct rb_node **n;
1229
1230 parent = *p;
1231 cfqq = rb_entry(parent, struct cfq_queue, p_node);
1232
1233 /*
1234 * Sort strictly based on sector. Smallest to the left,
1235 * largest to the right.
1236 */
2e46e8b2 1237 if (sector > blk_rq_pos(cfqq->next_rq))
a36e71f9 1238 n = &(*p)->rb_right;
2e46e8b2 1239 else if (sector < blk_rq_pos(cfqq->next_rq))
a36e71f9
JA
1240 n = &(*p)->rb_left;
1241 else
1242 break;
1243 p = n;
3ac6c9f8 1244 cfqq = NULL;
a36e71f9
JA
1245 }
1246
1247 *ret_parent = parent;
1248 if (rb_link)
1249 *rb_link = p;
3ac6c9f8 1250 return cfqq;
a36e71f9
JA
1251}
1252
1253static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1254{
a36e71f9
JA
1255 struct rb_node **p, *parent;
1256 struct cfq_queue *__cfqq;
1257
f2d1f0ae
JA
1258 if (cfqq->p_root) {
1259 rb_erase(&cfqq->p_node, cfqq->p_root);
1260 cfqq->p_root = NULL;
1261 }
a36e71f9
JA
1262
1263 if (cfq_class_idle(cfqq))
1264 return;
1265 if (!cfqq->next_rq)
1266 return;
1267
f2d1f0ae 1268 cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio];
2e46e8b2
TH
1269 __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root,
1270 blk_rq_pos(cfqq->next_rq), &parent, &p);
3ac6c9f8
JA
1271 if (!__cfqq) {
1272 rb_link_node(&cfqq->p_node, parent, p);
f2d1f0ae
JA
1273 rb_insert_color(&cfqq->p_node, cfqq->p_root);
1274 } else
1275 cfqq->p_root = NULL;
a36e71f9
JA
1276}
1277
498d3aa2
JA
1278/*
1279 * Update cfqq's position in the service tree.
1280 */
edd75ffd 1281static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
6d048f53 1282{
6d048f53
JA
1283 /*
1284 * Resorting requires the cfqq to be on the RR list already.
1285 */
a36e71f9 1286 if (cfq_cfqq_on_rr(cfqq)) {
edd75ffd 1287 cfq_service_tree_add(cfqd, cfqq, 0);
a36e71f9
JA
1288 cfq_prio_tree_add(cfqd, cfqq);
1289 }
6d048f53
JA
1290}
1291
1da177e4
LT
1292/*
1293 * add to busy list of queues for service, trying to be fair in ordering
22e2c507 1294 * the pending list according to last request service
1da177e4 1295 */
febffd61 1296static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1da177e4 1297{
7b679138 1298 cfq_log_cfqq(cfqd, cfqq, "add_to_rr");
3b18152c
JA
1299 BUG_ON(cfq_cfqq_on_rr(cfqq));
1300 cfq_mark_cfqq_on_rr(cfqq);
1da177e4 1301 cfqd->busy_queues++;
ef8a41df
SL
1302 if (cfq_cfqq_sync(cfqq))
1303 cfqd->busy_sync_queues++;
1da177e4 1304
edd75ffd 1305 cfq_resort_rr_list(cfqd, cfqq);
1da177e4
LT
1306}
1307
498d3aa2
JA
1308/*
1309 * Called when the cfqq no longer has requests pending, remove it from
1310 * the service tree.
1311 */
febffd61 1312static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1da177e4 1313{
7b679138 1314 cfq_log_cfqq(cfqd, cfqq, "del_from_rr");
3b18152c
JA
1315 BUG_ON(!cfq_cfqq_on_rr(cfqq));
1316 cfq_clear_cfqq_on_rr(cfqq);
1da177e4 1317
aa6f6a3d
CZ
1318 if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
1319 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
1320 cfqq->service_tree = NULL;
1321 }
f2d1f0ae
JA
1322 if (cfqq->p_root) {
1323 rb_erase(&cfqq->p_node, cfqq->p_root);
1324 cfqq->p_root = NULL;
1325 }
d9e7620e 1326
8184f93e 1327 cfq_group_notify_queue_del(cfqd, cfqq->cfqg);
1da177e4
LT
1328 BUG_ON(!cfqd->busy_queues);
1329 cfqd->busy_queues--;
ef8a41df
SL
1330 if (cfq_cfqq_sync(cfqq))
1331 cfqd->busy_sync_queues--;
1da177e4
LT
1332}
1333
1334/*
1335 * rb tree support functions
1336 */
febffd61 1337static void cfq_del_rq_rb(struct request *rq)
1da177e4 1338{
5e705374 1339 struct cfq_queue *cfqq = RQ_CFQQ(rq);
5e705374 1340 const int sync = rq_is_sync(rq);
1da177e4 1341
b4878f24
JA
1342 BUG_ON(!cfqq->queued[sync]);
1343 cfqq->queued[sync]--;
1da177e4 1344
5e705374 1345 elv_rb_del(&cfqq->sort_list, rq);
1da177e4 1346
f04a6424
VG
1347 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) {
1348 /*
1349 * Queue will be deleted from service tree when we actually
1350 * expire it later. Right now just remove it from prio tree
1351 * as it is empty.
1352 */
1353 if (cfqq->p_root) {
1354 rb_erase(&cfqq->p_node, cfqq->p_root);
1355 cfqq->p_root = NULL;
1356 }
1357 }
1da177e4
LT
1358}
1359
5e705374 1360static void cfq_add_rq_rb(struct request *rq)
1da177e4 1361{
5e705374 1362 struct cfq_queue *cfqq = RQ_CFQQ(rq);
1da177e4 1363 struct cfq_data *cfqd = cfqq->cfqd;
796d5116 1364 struct request *prev;
1da177e4 1365
5380a101 1366 cfqq->queued[rq_is_sync(rq)]++;
1da177e4 1367
796d5116 1368 elv_rb_add(&cfqq->sort_list, rq);
5fccbf61
JA
1369
1370 if (!cfq_cfqq_on_rr(cfqq))
1371 cfq_add_cfqq_rr(cfqd, cfqq);
5044eed4
JA
1372
1373 /*
1374 * check if this request is a better next-serve candidate
1375 */
a36e71f9 1376 prev = cfqq->next_rq;
cf7c25cf 1377 cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq, cfqd->last_position);
a36e71f9
JA
1378
1379 /*
1380 * adjust priority tree position, if ->next_rq changes
1381 */
1382 if (prev != cfqq->next_rq)
1383 cfq_prio_tree_add(cfqd, cfqq);
1384
5044eed4 1385 BUG_ON(!cfqq->next_rq);
1da177e4
LT
1386}
1387
febffd61 1388static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
1da177e4 1389{
5380a101
JA
1390 elv_rb_del(&cfqq->sort_list, rq);
1391 cfqq->queued[rq_is_sync(rq)]--;
0381411e 1392 cfq_blkiocg_update_io_remove_stats(cfqg_to_blkg(RQ_CFQG(rq)),
c1768268
TH
1393 &blkio_policy_cfq, rq_data_dir(rq),
1394 rq_is_sync(rq));
5e705374 1395 cfq_add_rq_rb(rq);
0381411e 1396 cfq_blkiocg_update_io_add_stats(cfqg_to_blkg(RQ_CFQG(rq)),
c1768268 1397 &blkio_policy_cfq,
0381411e
TH
1398 cfqg_to_blkg(cfqq->cfqd->serving_group),
1399 rq_data_dir(rq), rq_is_sync(rq));
1da177e4
LT
1400}
1401
206dc69b
JA
1402static struct request *
1403cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
1da177e4 1404{
206dc69b 1405 struct task_struct *tsk = current;
c5869807 1406 struct cfq_io_cq *cic;
206dc69b 1407 struct cfq_queue *cfqq;
1da177e4 1408
4ac845a2 1409 cic = cfq_cic_lookup(cfqd, tsk->io_context);
91fac317
VT
1410 if (!cic)
1411 return NULL;
1412
1413 cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
89850f7e
JA
1414 if (cfqq) {
1415 sector_t sector = bio->bi_sector + bio_sectors(bio);
1416
21183b07 1417 return elv_rb_find(&cfqq->sort_list, sector);
89850f7e 1418 }
1da177e4 1419
1da177e4
LT
1420 return NULL;
1421}
1422
165125e1 1423static void cfq_activate_request(struct request_queue *q, struct request *rq)
1da177e4 1424{
22e2c507 1425 struct cfq_data *cfqd = q->elevator->elevator_data;
3b18152c 1426
53c583d2 1427 cfqd->rq_in_driver++;
7b679138 1428 cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
53c583d2 1429 cfqd->rq_in_driver);
25776e35 1430
5b93629b 1431 cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
1da177e4
LT
1432}
1433
165125e1 1434static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
1da177e4 1435{
b4878f24
JA
1436 struct cfq_data *cfqd = q->elevator->elevator_data;
1437
53c583d2
CZ
1438 WARN_ON(!cfqd->rq_in_driver);
1439 cfqd->rq_in_driver--;
7b679138 1440 cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d",
53c583d2 1441 cfqd->rq_in_driver);
1da177e4
LT
1442}
1443
b4878f24 1444static void cfq_remove_request(struct request *rq)
1da177e4 1445{
5e705374 1446 struct cfq_queue *cfqq = RQ_CFQQ(rq);
21183b07 1447
5e705374
JA
1448 if (cfqq->next_rq == rq)
1449 cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
1da177e4 1450
b4878f24 1451 list_del_init(&rq->queuelist);
5e705374 1452 cfq_del_rq_rb(rq);
374f84ac 1453
45333d5a 1454 cfqq->cfqd->rq_queued--;
0381411e 1455 cfq_blkiocg_update_io_remove_stats(cfqg_to_blkg(RQ_CFQG(rq)),
c1768268
TH
1456 &blkio_policy_cfq, rq_data_dir(rq),
1457 rq_is_sync(rq));
65299a3b
CH
1458 if (rq->cmd_flags & REQ_PRIO) {
1459 WARN_ON(!cfqq->prio_pending);
1460 cfqq->prio_pending--;
b53d1ed7 1461 }
1da177e4
LT
1462}
1463
165125e1
JA
1464static int cfq_merge(struct request_queue *q, struct request **req,
1465 struct bio *bio)
1da177e4
LT
1466{
1467 struct cfq_data *cfqd = q->elevator->elevator_data;
1468 struct request *__rq;
1da177e4 1469
206dc69b 1470 __rq = cfq_find_rq_fmerge(cfqd, bio);
22e2c507 1471 if (__rq && elv_rq_merge_ok(__rq, bio)) {
9817064b
JA
1472 *req = __rq;
1473 return ELEVATOR_FRONT_MERGE;
1da177e4
LT
1474 }
1475
1476 return ELEVATOR_NO_MERGE;
1da177e4
LT
1477}
1478
165125e1 1479static void cfq_merged_request(struct request_queue *q, struct request *req,
21183b07 1480 int type)
1da177e4 1481{
21183b07 1482 if (type == ELEVATOR_FRONT_MERGE) {
5e705374 1483 struct cfq_queue *cfqq = RQ_CFQQ(req);
1da177e4 1484
5e705374 1485 cfq_reposition_rq_rb(cfqq, req);
1da177e4 1486 }
1da177e4
LT
1487}
1488
812d4026
DS
1489static void cfq_bio_merged(struct request_queue *q, struct request *req,
1490 struct bio *bio)
1491{
0381411e 1492 cfq_blkiocg_update_io_merged_stats(cfqg_to_blkg(RQ_CFQG(req)),
c1768268
TH
1493 &blkio_policy_cfq, bio_data_dir(bio),
1494 cfq_bio_sync(bio));
812d4026
DS
1495}
1496
1da177e4 1497static void
165125e1 1498cfq_merged_requests(struct request_queue *q, struct request *rq,
1da177e4
LT
1499 struct request *next)
1500{
cf7c25cf 1501 struct cfq_queue *cfqq = RQ_CFQQ(rq);
4a0b75c7
SL
1502 struct cfq_data *cfqd = q->elevator->elevator_data;
1503
22e2c507
JA
1504 /*
1505 * reposition in fifo if next is older than rq
1506 */
1507 if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
30996f40 1508 time_before(rq_fifo_time(next), rq_fifo_time(rq))) {
22e2c507 1509 list_move(&rq->queuelist, &next->queuelist);
30996f40
JA
1510 rq_set_fifo_time(rq, rq_fifo_time(next));
1511 }
22e2c507 1512
cf7c25cf
CZ
1513 if (cfqq->next_rq == next)
1514 cfqq->next_rq = rq;
b4878f24 1515 cfq_remove_request(next);
0381411e 1516 cfq_blkiocg_update_io_merged_stats(cfqg_to_blkg(RQ_CFQG(rq)),
c1768268
TH
1517 &blkio_policy_cfq, rq_data_dir(next),
1518 rq_is_sync(next));
4a0b75c7
SL
1519
1520 cfqq = RQ_CFQQ(next);
1521 /*
1522 * all requests of this queue are merged to other queues, delete it
1523 * from the service tree. If it's the active_queue,
1524 * cfq_dispatch_requests() will choose to expire it or do idle
1525 */
1526 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list) &&
1527 cfqq != cfqd->active_queue)
1528 cfq_del_cfqq_rr(cfqd, cfqq);
22e2c507
JA
1529}
1530
165125e1 1531static int cfq_allow_merge(struct request_queue *q, struct request *rq,
da775265
JA
1532 struct bio *bio)
1533{
1534 struct cfq_data *cfqd = q->elevator->elevator_data;
c5869807 1535 struct cfq_io_cq *cic;
da775265 1536 struct cfq_queue *cfqq;
da775265
JA
1537
1538 /*
ec8acb69 1539 * Disallow merge of a sync bio into an async request.
da775265 1540 */
91fac317 1541 if (cfq_bio_sync(bio) && !rq_is_sync(rq))
a6151c3a 1542 return false;
da775265
JA
1543
1544 /*
f1a4f4d3 1545 * Lookup the cfqq that this bio will be queued with and allow
07c2bd37 1546 * merge only if rq is queued there.
f1a4f4d3 1547 */
07c2bd37
TH
1548 cic = cfq_cic_lookup(cfqd, current->io_context);
1549 if (!cic)
1550 return false;
719d3402 1551
91fac317 1552 cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
a6151c3a 1553 return cfqq == RQ_CFQQ(rq);
da775265
JA
1554}
1555
812df48d
DS
1556static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1557{
1558 del_timer(&cfqd->idle_slice_timer);
c1768268
TH
1559 cfq_blkiocg_update_idle_time_stats(cfqg_to_blkg(cfqq->cfqg),
1560 &blkio_policy_cfq);
812df48d
DS
1561}
1562
febffd61
JA
1563static void __cfq_set_active_queue(struct cfq_data *cfqd,
1564 struct cfq_queue *cfqq)
22e2c507
JA
1565{
1566 if (cfqq) {
b1ffe737
DS
1567 cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d",
1568 cfqd->serving_prio, cfqd->serving_type);
c1768268
TH
1569 cfq_blkiocg_update_avg_queue_size_stats(cfqg_to_blkg(cfqq->cfqg),
1570 &blkio_policy_cfq);
62a37f6b
JT
1571 cfqq->slice_start = 0;
1572 cfqq->dispatch_start = jiffies;
1573 cfqq->allocated_slice = 0;
1574 cfqq->slice_end = 0;
1575 cfqq->slice_dispatch = 0;
1576 cfqq->nr_sectors = 0;
1577
1578 cfq_clear_cfqq_wait_request(cfqq);
1579 cfq_clear_cfqq_must_dispatch(cfqq);
1580 cfq_clear_cfqq_must_alloc_slice(cfqq);
1581 cfq_clear_cfqq_fifo_expire(cfqq);
1582 cfq_mark_cfqq_slice_new(cfqq);
1583
1584 cfq_del_timer(cfqd, cfqq);
22e2c507
JA
1585 }
1586
1587 cfqd->active_queue = cfqq;
1588}
1589
7b14e3b5
JA
1590/*
1591 * current cfqq expired its slice (or was too idle), select new one
1592 */
1593static void
1594__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
e5ff082e 1595 bool timed_out)
7b14e3b5 1596{
7b679138
JA
1597 cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
1598
7b14e3b5 1599 if (cfq_cfqq_wait_request(cfqq))
812df48d 1600 cfq_del_timer(cfqd, cfqq);
7b14e3b5 1601
7b14e3b5 1602 cfq_clear_cfqq_wait_request(cfqq);
f75edf2d 1603 cfq_clear_cfqq_wait_busy(cfqq);
7b14e3b5 1604
ae54abed
SL
1605 /*
1606 * If this cfqq is shared between multiple processes, check to
1607 * make sure that those processes are still issuing I/Os within
1608 * the mean seek distance. If not, it may be time to break the
1609 * queues apart again.
1610 */
1611 if (cfq_cfqq_coop(cfqq) && CFQQ_SEEKY(cfqq))
1612 cfq_mark_cfqq_split_coop(cfqq);
1613
7b14e3b5 1614 /*
6084cdda 1615 * store what was left of this slice, if the queue idled/timed out
7b14e3b5 1616 */
c553f8e3
SL
1617 if (timed_out) {
1618 if (cfq_cfqq_slice_new(cfqq))
ba5bd520 1619 cfqq->slice_resid = cfq_scaled_cfqq_slice(cfqd, cfqq);
c553f8e3
SL
1620 else
1621 cfqq->slice_resid = cfqq->slice_end - jiffies;
7b679138
JA
1622 cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid);
1623 }
7b14e3b5 1624
e5ff082e 1625 cfq_group_served(cfqd, cfqq->cfqg, cfqq);
dae739eb 1626
f04a6424
VG
1627 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
1628 cfq_del_cfqq_rr(cfqd, cfqq);
1629
edd75ffd 1630 cfq_resort_rr_list(cfqd, cfqq);
7b14e3b5
JA
1631
1632 if (cfqq == cfqd->active_queue)
1633 cfqd->active_queue = NULL;
1634
1635 if (cfqd->active_cic) {
11a3122f 1636 put_io_context(cfqd->active_cic->icq.ioc);
7b14e3b5
JA
1637 cfqd->active_cic = NULL;
1638 }
7b14e3b5
JA
1639}
1640
e5ff082e 1641static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
7b14e3b5
JA
1642{
1643 struct cfq_queue *cfqq = cfqd->active_queue;
1644
1645 if (cfqq)
e5ff082e 1646 __cfq_slice_expired(cfqd, cfqq, timed_out);
7b14e3b5
JA
1647}
1648
498d3aa2
JA
1649/*
1650 * Get next queue for service. Unless we have a queue preemption,
1651 * we'll simply select the first cfqq in the service tree.
1652 */
6d048f53 1653static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
22e2c507 1654{
c0324a02 1655 struct cfq_rb_root *service_tree =
cdb16e8f 1656 service_tree_for(cfqd->serving_group, cfqd->serving_prio,
65b32a57 1657 cfqd->serving_type);
d9e7620e 1658
f04a6424
VG
1659 if (!cfqd->rq_queued)
1660 return NULL;
1661
1fa8f6d6
VG
1662 /* There is nothing to dispatch */
1663 if (!service_tree)
1664 return NULL;
c0324a02
CZ
1665 if (RB_EMPTY_ROOT(&service_tree->rb))
1666 return NULL;
1667 return cfq_rb_first(service_tree);
6d048f53
JA
1668}
1669
f04a6424
VG
1670static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd)
1671{
25fb5169 1672 struct cfq_group *cfqg;
f04a6424
VG
1673 struct cfq_queue *cfqq;
1674 int i, j;
1675 struct cfq_rb_root *st;
1676
1677 if (!cfqd->rq_queued)
1678 return NULL;
1679
25fb5169
VG
1680 cfqg = cfq_get_next_cfqg(cfqd);
1681 if (!cfqg)
1682 return NULL;
1683
f04a6424
VG
1684 for_each_cfqg_st(cfqg, i, j, st)
1685 if ((cfqq = cfq_rb_first(st)) != NULL)
1686 return cfqq;
1687 return NULL;
1688}
1689
498d3aa2
JA
1690/*
1691 * Get and set a new active queue for service.
1692 */
a36e71f9
JA
1693static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
1694 struct cfq_queue *cfqq)
6d048f53 1695{
e00ef799 1696 if (!cfqq)
a36e71f9 1697 cfqq = cfq_get_next_queue(cfqd);
6d048f53 1698
22e2c507 1699 __cfq_set_active_queue(cfqd, cfqq);
3b18152c 1700 return cfqq;
22e2c507
JA
1701}
1702
d9e7620e
JA
1703static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
1704 struct request *rq)
1705{
83096ebf
TH
1706 if (blk_rq_pos(rq) >= cfqd->last_position)
1707 return blk_rq_pos(rq) - cfqd->last_position;
d9e7620e 1708 else
83096ebf 1709 return cfqd->last_position - blk_rq_pos(rq);
d9e7620e
JA
1710}
1711
b2c18e1e 1712static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
e9ce335d 1713 struct request *rq)
6d048f53 1714{
e9ce335d 1715 return cfq_dist_from_last(cfqd, rq) <= CFQQ_CLOSE_THR;
6d048f53
JA
1716}
1717
a36e71f9
JA
1718static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
1719 struct cfq_queue *cur_cfqq)
1720{
f2d1f0ae 1721 struct rb_root *root = &cfqd->prio_trees[cur_cfqq->org_ioprio];
a36e71f9
JA
1722 struct rb_node *parent, *node;
1723 struct cfq_queue *__cfqq;
1724 sector_t sector = cfqd->last_position;
1725
1726 if (RB_EMPTY_ROOT(root))
1727 return NULL;
1728
1729 /*
1730 * First, if we find a request starting at the end of the last
1731 * request, choose it.
1732 */
f2d1f0ae 1733 __cfqq = cfq_prio_tree_lookup(cfqd, root, sector, &parent, NULL);
a36e71f9
JA
1734 if (__cfqq)
1735 return __cfqq;
1736
1737 /*
1738 * If the exact sector wasn't found, the parent of the NULL leaf
1739 * will contain the closest sector.
1740 */
1741 __cfqq = rb_entry(parent, struct cfq_queue, p_node);
e9ce335d 1742 if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
a36e71f9
JA
1743 return __cfqq;
1744
2e46e8b2 1745 if (blk_rq_pos(__cfqq->next_rq) < sector)
a36e71f9
JA
1746 node = rb_next(&__cfqq->p_node);
1747 else
1748 node = rb_prev(&__cfqq->p_node);
1749 if (!node)
1750 return NULL;
1751
1752 __cfqq = rb_entry(node, struct cfq_queue, p_node);
e9ce335d 1753 if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
a36e71f9
JA
1754 return __cfqq;
1755
1756 return NULL;
1757}
1758
1759/*
1760 * cfqd - obvious
1761 * cur_cfqq - passed in so that we don't decide that the current queue is
1762 * closely cooperating with itself.
1763 *
1764 * So, basically we're assuming that that cur_cfqq has dispatched at least
1765 * one request, and that cfqd->last_position reflects a position on the disk
1766 * associated with the I/O issued by cur_cfqq. I'm not sure this is a valid
1767 * assumption.
1768 */
1769static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
b3b6d040 1770 struct cfq_queue *cur_cfqq)
6d048f53 1771{
a36e71f9
JA
1772 struct cfq_queue *cfqq;
1773
39c01b21
DS
1774 if (cfq_class_idle(cur_cfqq))
1775 return NULL;
e6c5bc73
JM
1776 if (!cfq_cfqq_sync(cur_cfqq))
1777 return NULL;
1778 if (CFQQ_SEEKY(cur_cfqq))
1779 return NULL;
1780
b9d8f4c7
GJ
1781 /*
1782 * Don't search priority tree if it's the only queue in the group.
1783 */
1784 if (cur_cfqq->cfqg->nr_cfqq == 1)
1785 return NULL;
1786
6d048f53 1787 /*
d9e7620e
JA
1788 * We should notice if some of the queues are cooperating, eg
1789 * working closely on the same area of the disk. In that case,
1790 * we can group them together and don't waste time idling.
6d048f53 1791 */
a36e71f9
JA
1792 cfqq = cfqq_close(cfqd, cur_cfqq);
1793 if (!cfqq)
1794 return NULL;
1795
8682e1f1
VG
1796 /* If new queue belongs to different cfq_group, don't choose it */
1797 if (cur_cfqq->cfqg != cfqq->cfqg)
1798 return NULL;
1799
df5fe3e8
JM
1800 /*
1801 * It only makes sense to merge sync queues.
1802 */
1803 if (!cfq_cfqq_sync(cfqq))
1804 return NULL;
e6c5bc73
JM
1805 if (CFQQ_SEEKY(cfqq))
1806 return NULL;
df5fe3e8 1807
c0324a02
CZ
1808 /*
1809 * Do not merge queues of different priority classes
1810 */
1811 if (cfq_class_rt(cfqq) != cfq_class_rt(cur_cfqq))
1812 return NULL;
1813
a36e71f9 1814 return cfqq;
6d048f53
JA
1815}
1816
a6d44e98
CZ
1817/*
1818 * Determine whether we should enforce idle window for this queue.
1819 */
1820
1821static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1822{
1823 enum wl_prio_t prio = cfqq_prio(cfqq);
718eee05 1824 struct cfq_rb_root *service_tree = cfqq->service_tree;
a6d44e98 1825
f04a6424
VG
1826 BUG_ON(!service_tree);
1827 BUG_ON(!service_tree->count);
1828
b6508c16
VG
1829 if (!cfqd->cfq_slice_idle)
1830 return false;
1831
a6d44e98
CZ
1832 /* We never do for idle class queues. */
1833 if (prio == IDLE_WORKLOAD)
1834 return false;
1835
1836 /* We do for queues that were marked with idle window flag. */
3c764b7a
SL
1837 if (cfq_cfqq_idle_window(cfqq) &&
1838 !(blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag))
a6d44e98
CZ
1839 return true;
1840
1841 /*
1842 * Otherwise, we do only if they are the last ones
1843 * in their service tree.
1844 */
f5f2b6ce
SL
1845 if (service_tree->count == 1 && cfq_cfqq_sync(cfqq) &&
1846 !cfq_io_thinktime_big(cfqd, &service_tree->ttime, false))
c1e44756 1847 return true;
b1ffe737
DS
1848 cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d",
1849 service_tree->count);
c1e44756 1850 return false;
a6d44e98
CZ
1851}
1852
6d048f53 1853static void cfq_arm_slice_timer(struct cfq_data *cfqd)
22e2c507 1854{
1792669c 1855 struct cfq_queue *cfqq = cfqd->active_queue;
c5869807 1856 struct cfq_io_cq *cic;
80bdf0c7 1857 unsigned long sl, group_idle = 0;
7b14e3b5 1858
a68bbddb 1859 /*
f7d7b7a7
JA
1860 * SSD device without seek penalty, disable idling. But only do so
1861 * for devices that support queuing, otherwise we still have a problem
1862 * with sync vs async workloads.
a68bbddb 1863 */
f7d7b7a7 1864 if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)
a68bbddb
JA
1865 return;
1866
dd67d051 1867 WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
6d048f53 1868 WARN_ON(cfq_cfqq_slice_new(cfqq));
22e2c507
JA
1869
1870 /*
1871 * idle is disabled, either manually or by past process history
1872 */
80bdf0c7
VG
1873 if (!cfq_should_idle(cfqd, cfqq)) {
1874 /* no queue idling. Check for group idling */
1875 if (cfqd->cfq_group_idle)
1876 group_idle = cfqd->cfq_group_idle;
1877 else
1878 return;
1879 }
6d048f53 1880
7b679138 1881 /*
8e550632 1882 * still active requests from this queue, don't idle
7b679138 1883 */
8e550632 1884 if (cfqq->dispatched)
7b679138
JA
1885 return;
1886
22e2c507
JA
1887 /*
1888 * task has exited, don't wait
1889 */
206dc69b 1890 cic = cfqd->active_cic;
f6e8d01b 1891 if (!cic || !atomic_read(&cic->icq.ioc->active_ref))
6d048f53
JA
1892 return;
1893
355b659c
CZ
1894 /*
1895 * If our average think time is larger than the remaining time
1896 * slice, then don't idle. This avoids overrunning the allotted
1897 * time slice.
1898 */
383cd721
SL
1899 if (sample_valid(cic->ttime.ttime_samples) &&
1900 (cfqq->slice_end - jiffies < cic->ttime.ttime_mean)) {
fd16d263 1901 cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%lu",
383cd721 1902 cic->ttime.ttime_mean);
355b659c 1903 return;
b1ffe737 1904 }
355b659c 1905
80bdf0c7
VG
1906 /* There are other queues in the group, don't do group idle */
1907 if (group_idle && cfqq->cfqg->nr_cfqq > 1)
1908 return;
1909
3b18152c 1910 cfq_mark_cfqq_wait_request(cfqq);
22e2c507 1911
80bdf0c7
VG
1912 if (group_idle)
1913 sl = cfqd->cfq_group_idle;
1914 else
1915 sl = cfqd->cfq_slice_idle;
206dc69b 1916
7b14e3b5 1917 mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
c1768268
TH
1918 cfq_blkiocg_update_set_idle_time_stats(cfqg_to_blkg(cfqq->cfqg),
1919 &blkio_policy_cfq);
80bdf0c7
VG
1920 cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl,
1921 group_idle ? 1 : 0);
1da177e4
LT
1922}
1923
498d3aa2
JA
1924/*
1925 * Move request from internal lists to the request queue dispatch list.
1926 */
165125e1 1927static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
1da177e4 1928{
3ed9a296 1929 struct cfq_data *cfqd = q->elevator->elevator_data;
5e705374 1930 struct cfq_queue *cfqq = RQ_CFQQ(rq);
22e2c507 1931
7b679138
JA
1932 cfq_log_cfqq(cfqd, cfqq, "dispatch_insert");
1933
06d21886 1934 cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq);
5380a101 1935 cfq_remove_request(rq);
6d048f53 1936 cfqq->dispatched++;
80bdf0c7 1937 (RQ_CFQG(rq))->dispatched++;
5380a101 1938 elv_dispatch_sort(q, rq);
3ed9a296 1939
53c583d2 1940 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
c4e7893e 1941 cfqq->nr_sectors += blk_rq_sectors(rq);
0381411e 1942 cfq_blkiocg_update_dispatch_stats(cfqg_to_blkg(cfqq->cfqg),
c1768268
TH
1943 &blkio_policy_cfq, blk_rq_bytes(rq),
1944 rq_data_dir(rq), rq_is_sync(rq));
1da177e4
LT
1945}
1946
1947/*
1948 * return expired entry, or NULL to just start from scratch in rbtree
1949 */
febffd61 1950static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
1da177e4 1951{
30996f40 1952 struct request *rq = NULL;
1da177e4 1953
3b18152c 1954 if (cfq_cfqq_fifo_expire(cfqq))
1da177e4 1955 return NULL;
cb887411
JA
1956
1957 cfq_mark_cfqq_fifo_expire(cfqq);
1958
89850f7e
JA
1959 if (list_empty(&cfqq->fifo))
1960 return NULL;
1da177e4 1961
89850f7e 1962 rq = rq_entry_fifo(cfqq->fifo.next);
30996f40 1963 if (time_before(jiffies, rq_fifo_time(rq)))
7b679138 1964 rq = NULL;
1da177e4 1965
30996f40 1966 cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
6d048f53 1967 return rq;
1da177e4
LT
1968}
1969
22e2c507
JA
1970static inline int
1971cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1972{
1973 const int base_rq = cfqd->cfq_slice_async_rq;
1da177e4 1974
22e2c507 1975 WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
1da177e4 1976
b9f8ce05 1977 return 2 * base_rq * (IOPRIO_BE_NR - cfqq->ioprio);
1da177e4
LT
1978}
1979
df5fe3e8
JM
1980/*
1981 * Must be called with the queue_lock held.
1982 */
1983static int cfqq_process_refs(struct cfq_queue *cfqq)
1984{
1985 int process_refs, io_refs;
1986
1987 io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE];
30d7b944 1988 process_refs = cfqq->ref - io_refs;
df5fe3e8
JM
1989 BUG_ON(process_refs < 0);
1990 return process_refs;
1991}
1992
1993static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
1994{
e6c5bc73 1995 int process_refs, new_process_refs;
df5fe3e8
JM
1996 struct cfq_queue *__cfqq;
1997
c10b61f0
JM
1998 /*
1999 * If there are no process references on the new_cfqq, then it is
2000 * unsafe to follow the ->new_cfqq chain as other cfqq's in the
2001 * chain may have dropped their last reference (not just their
2002 * last process reference).
2003 */
2004 if (!cfqq_process_refs(new_cfqq))
2005 return;
2006
df5fe3e8
JM
2007 /* Avoid a circular list and skip interim queue merges */
2008 while ((__cfqq = new_cfqq->new_cfqq)) {
2009 if (__cfqq == cfqq)
2010 return;
2011 new_cfqq = __cfqq;
2012 }
2013
2014 process_refs = cfqq_process_refs(cfqq);
c10b61f0 2015 new_process_refs = cfqq_process_refs(new_cfqq);
df5fe3e8
JM
2016 /*
2017 * If the process for the cfqq has gone away, there is no
2018 * sense in merging the queues.
2019 */
c10b61f0 2020 if (process_refs == 0 || new_process_refs == 0)
df5fe3e8
JM
2021 return;
2022
e6c5bc73
JM
2023 /*
2024 * Merge in the direction of the lesser amount of work.
2025 */
e6c5bc73
JM
2026 if (new_process_refs >= process_refs) {
2027 cfqq->new_cfqq = new_cfqq;
30d7b944 2028 new_cfqq->ref += process_refs;
e6c5bc73
JM
2029 } else {
2030 new_cfqq->new_cfqq = cfqq;
30d7b944 2031 cfqq->ref += new_process_refs;
e6c5bc73 2032 }
df5fe3e8
JM
2033}
2034
cdb16e8f 2035static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd,
65b32a57 2036 struct cfq_group *cfqg, enum wl_prio_t prio)
718eee05
CZ
2037{
2038 struct cfq_queue *queue;
2039 int i;
2040 bool key_valid = false;
2041 unsigned long lowest_key = 0;
2042 enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD;
2043
65b32a57
VG
2044 for (i = 0; i <= SYNC_WORKLOAD; ++i) {
2045 /* select the one with lowest rb_key */
2046 queue = cfq_rb_first(service_tree_for(cfqg, prio, i));
718eee05
CZ
2047 if (queue &&
2048 (!key_valid || time_before(queue->rb_key, lowest_key))) {
2049 lowest_key = queue->rb_key;
2050 cur_best = i;
2051 key_valid = true;
2052 }
2053 }
2054
2055 return cur_best;
2056}
2057
cdb16e8f 2058static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
718eee05 2059{
718eee05
CZ
2060 unsigned slice;
2061 unsigned count;
cdb16e8f 2062 struct cfq_rb_root *st;
58ff82f3 2063 unsigned group_slice;
e4ea0c16 2064 enum wl_prio_t original_prio = cfqd->serving_prio;
1fa8f6d6 2065
718eee05 2066 /* Choose next priority. RT > BE > IDLE */
58ff82f3 2067 if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg))
718eee05 2068 cfqd->serving_prio = RT_WORKLOAD;
58ff82f3 2069 else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg))
718eee05
CZ
2070 cfqd->serving_prio = BE_WORKLOAD;
2071 else {
2072 cfqd->serving_prio = IDLE_WORKLOAD;
2073 cfqd->workload_expires = jiffies + 1;
2074 return;
2075 }
2076
e4ea0c16
SL
2077 if (original_prio != cfqd->serving_prio)
2078 goto new_workload;
2079
718eee05
CZ
2080 /*
2081 * For RT and BE, we have to choose also the type
2082 * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload
2083 * expiration time
2084 */
65b32a57 2085 st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
cdb16e8f 2086 count = st->count;
718eee05
CZ
2087
2088 /*
65b32a57 2089 * check workload expiration, and that we still have other queues ready
718eee05 2090 */
65b32a57 2091 if (count && !time_after(jiffies, cfqd->workload_expires))
718eee05
CZ
2092 return;
2093
e4ea0c16 2094new_workload:
718eee05
CZ
2095 /* otherwise select new workload type */
2096 cfqd->serving_type =
65b32a57
VG
2097 cfq_choose_wl(cfqd, cfqg, cfqd->serving_prio);
2098 st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
cdb16e8f 2099 count = st->count;
718eee05
CZ
2100
2101 /*
2102 * the workload slice is computed as a fraction of target latency
2103 * proportional to the number of queues in that workload, over
2104 * all the queues in the same priority class
2105 */
58ff82f3
VG
2106 group_slice = cfq_group_slice(cfqd, cfqg);
2107
2108 slice = group_slice * count /
2109 max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_prio],
2110 cfq_group_busy_queues_wl(cfqd->serving_prio, cfqd, cfqg));
718eee05 2111
f26bd1f0
VG
2112 if (cfqd->serving_type == ASYNC_WORKLOAD) {
2113 unsigned int tmp;
2114
2115 /*
2116 * Async queues are currently system wide. Just taking
2117 * proportion of queues with-in same group will lead to higher
2118 * async ratio system wide as generally root group is going
2119 * to have higher weight. A more accurate thing would be to
2120 * calculate system wide asnc/sync ratio.
2121 */
2122 tmp = cfq_target_latency * cfqg_busy_async_queues(cfqd, cfqg);
2123 tmp = tmp/cfqd->busy_queues;
2124 slice = min_t(unsigned, slice, tmp);
2125
718eee05
CZ
2126 /* async workload slice is scaled down according to
2127 * the sync/async slice ratio. */
2128 slice = slice * cfqd->cfq_slice[0] / cfqd->cfq_slice[1];
f26bd1f0 2129 } else
718eee05
CZ
2130 /* sync workload slice is at least 2 * cfq_slice_idle */
2131 slice = max(slice, 2 * cfqd->cfq_slice_idle);
2132
2133 slice = max_t(unsigned, slice, CFQ_MIN_TT);
b1ffe737 2134 cfq_log(cfqd, "workload slice:%d", slice);
718eee05
CZ
2135 cfqd->workload_expires = jiffies + slice;
2136}
2137
1fa8f6d6
VG
2138static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd)
2139{
2140 struct cfq_rb_root *st = &cfqd->grp_service_tree;
25bc6b07 2141 struct cfq_group *cfqg;
1fa8f6d6
VG
2142
2143 if (RB_EMPTY_ROOT(&st->rb))
2144 return NULL;
25bc6b07 2145 cfqg = cfq_rb_first_group(st);
25bc6b07
VG
2146 update_min_vdisktime(st);
2147 return cfqg;
1fa8f6d6
VG
2148}
2149
cdb16e8f
VG
2150static void cfq_choose_cfqg(struct cfq_data *cfqd)
2151{
1fa8f6d6
VG
2152 struct cfq_group *cfqg = cfq_get_next_cfqg(cfqd);
2153
2154 cfqd->serving_group = cfqg;
dae739eb
VG
2155
2156 /* Restore the workload type data */
2157 if (cfqg->saved_workload_slice) {
2158 cfqd->workload_expires = jiffies + cfqg->saved_workload_slice;
2159 cfqd->serving_type = cfqg->saved_workload;
2160 cfqd->serving_prio = cfqg->saved_serving_prio;
66ae2919
GJ
2161 } else
2162 cfqd->workload_expires = jiffies - 1;
2163
1fa8f6d6 2164 choose_service_tree(cfqd, cfqg);
cdb16e8f
VG
2165}
2166
22e2c507 2167/*
498d3aa2
JA
2168 * Select a queue for service. If we have a current active queue,
2169 * check whether to continue servicing it, or retrieve and set a new one.
22e2c507 2170 */
1b5ed5e1 2171static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
1da177e4 2172{
a36e71f9 2173 struct cfq_queue *cfqq, *new_cfqq = NULL;
1da177e4 2174
22e2c507
JA
2175 cfqq = cfqd->active_queue;
2176 if (!cfqq)
2177 goto new_queue;
1da177e4 2178
f04a6424
VG
2179 if (!cfqd->rq_queued)
2180 return NULL;
c244bb50
VG
2181
2182 /*
2183 * We were waiting for group to get backlogged. Expire the queue
2184 */
2185 if (cfq_cfqq_wait_busy(cfqq) && !RB_EMPTY_ROOT(&cfqq->sort_list))
2186 goto expire;
2187
22e2c507 2188 /*
6d048f53 2189 * The active queue has run out of time, expire it and select new.
22e2c507 2190 */
7667aa06
VG
2191 if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) {
2192 /*
2193 * If slice had not expired at the completion of last request
2194 * we might not have turned on wait_busy flag. Don't expire
2195 * the queue yet. Allow the group to get backlogged.
2196 *
2197 * The very fact that we have used the slice, that means we
2198 * have been idling all along on this queue and it should be
2199 * ok to wait for this request to complete.
2200 */
82bbbf28
VG
2201 if (cfqq->cfqg->nr_cfqq == 1 && RB_EMPTY_ROOT(&cfqq->sort_list)
2202 && cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
2203 cfqq = NULL;
7667aa06 2204 goto keep_queue;
82bbbf28 2205 } else
80bdf0c7 2206 goto check_group_idle;
7667aa06 2207 }
1da177e4 2208
22e2c507 2209 /*
6d048f53
JA
2210 * The active queue has requests and isn't expired, allow it to
2211 * dispatch.
22e2c507 2212 */
dd67d051 2213 if (!RB_EMPTY_ROOT(&cfqq->sort_list))
22e2c507 2214 goto keep_queue;
6d048f53 2215
a36e71f9
JA
2216 /*
2217 * If another queue has a request waiting within our mean seek
2218 * distance, let it run. The expire code will check for close
2219 * cooperators and put the close queue at the front of the service
df5fe3e8 2220 * tree. If possible, merge the expiring queue with the new cfqq.
a36e71f9 2221 */
b3b6d040 2222 new_cfqq = cfq_close_cooperator(cfqd, cfqq);
df5fe3e8
JM
2223 if (new_cfqq) {
2224 if (!cfqq->new_cfqq)
2225 cfq_setup_merge(cfqq, new_cfqq);
a36e71f9 2226 goto expire;
df5fe3e8 2227 }
a36e71f9 2228
6d048f53
JA
2229 /*
2230 * No requests pending. If the active queue still has requests in
2231 * flight or is idling for a new request, allow either of these
2232 * conditions to happen (or time out) before selecting a new queue.
2233 */
80bdf0c7
VG
2234 if (timer_pending(&cfqd->idle_slice_timer)) {
2235 cfqq = NULL;
2236 goto keep_queue;
2237 }
2238
8e1ac665
SL
2239 /*
2240 * This is a deep seek queue, but the device is much faster than
2241 * the queue can deliver, don't idle
2242 **/
2243 if (CFQQ_SEEKY(cfqq) && cfq_cfqq_idle_window(cfqq) &&
2244 (cfq_cfqq_slice_new(cfqq) ||
2245 (cfqq->slice_end - jiffies > jiffies - cfqq->slice_start))) {
2246 cfq_clear_cfqq_deep(cfqq);
2247 cfq_clear_cfqq_idle_window(cfqq);
2248 }
2249
80bdf0c7
VG
2250 if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
2251 cfqq = NULL;
2252 goto keep_queue;
2253 }
2254
2255 /*
2256 * If group idle is enabled and there are requests dispatched from
2257 * this group, wait for requests to complete.
2258 */
2259check_group_idle:
7700fc4f
SL
2260 if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1 &&
2261 cfqq->cfqg->dispatched &&
2262 !cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true)) {
caaa5f9f
JA
2263 cfqq = NULL;
2264 goto keep_queue;
22e2c507
JA
2265 }
2266
3b18152c 2267expire:
e5ff082e 2268 cfq_slice_expired(cfqd, 0);
3b18152c 2269new_queue:
718eee05
CZ
2270 /*
2271 * Current queue expired. Check if we have to switch to a new
2272 * service tree
2273 */
2274 if (!new_cfqq)
cdb16e8f 2275 cfq_choose_cfqg(cfqd);
718eee05 2276
a36e71f9 2277 cfqq = cfq_set_active_queue(cfqd, new_cfqq);
22e2c507 2278keep_queue:
3b18152c 2279 return cfqq;
22e2c507
JA
2280}
2281
febffd61 2282static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
d9e7620e
JA
2283{
2284 int dispatched = 0;
2285
2286 while (cfqq->next_rq) {
2287 cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
2288 dispatched++;
2289 }
2290
2291 BUG_ON(!list_empty(&cfqq->fifo));
f04a6424
VG
2292
2293 /* By default cfqq is not expired if it is empty. Do it explicitly */
e5ff082e 2294 __cfq_slice_expired(cfqq->cfqd, cfqq, 0);
d9e7620e
JA
2295 return dispatched;
2296}
2297
498d3aa2
JA
2298/*
2299 * Drain our current requests. Used for barriers and when switching
2300 * io schedulers on-the-fly.
2301 */
d9e7620e 2302static int cfq_forced_dispatch(struct cfq_data *cfqd)
1b5ed5e1 2303{
0871714e 2304 struct cfq_queue *cfqq;
d9e7620e 2305 int dispatched = 0;
cdb16e8f 2306
3440c49f 2307 /* Expire the timeslice of the current active queue first */
e5ff082e 2308 cfq_slice_expired(cfqd, 0);
3440c49f
DS
2309 while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) {
2310 __cfq_set_active_queue(cfqd, cfqq);
f04a6424 2311 dispatched += __cfq_forced_dispatch_cfqq(cfqq);
3440c49f 2312 }
1b5ed5e1 2313
1b5ed5e1
TH
2314 BUG_ON(cfqd->busy_queues);
2315
6923715a 2316 cfq_log(cfqd, "forced_dispatch=%d", dispatched);
1b5ed5e1
TH
2317 return dispatched;
2318}
2319
abc3c744
SL
2320static inline bool cfq_slice_used_soon(struct cfq_data *cfqd,
2321 struct cfq_queue *cfqq)
2322{
2323 /* the queue hasn't finished any request, can't estimate */
2324 if (cfq_cfqq_slice_new(cfqq))
c1e44756 2325 return true;
abc3c744
SL
2326 if (time_after(jiffies + cfqd->cfq_slice_idle * cfqq->dispatched,
2327 cfqq->slice_end))
c1e44756 2328 return true;
abc3c744 2329
c1e44756 2330 return false;
abc3c744
SL
2331}
2332
0b182d61 2333static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2f5cb738 2334{
2f5cb738 2335 unsigned int max_dispatch;
22e2c507 2336
5ad531db
JA
2337 /*
2338 * Drain async requests before we start sync IO
2339 */
53c583d2 2340 if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_flight[BLK_RW_ASYNC])
0b182d61 2341 return false;
5ad531db 2342
2f5cb738
JA
2343 /*
2344 * If this is an async queue and we have sync IO in flight, let it wait
2345 */
53c583d2 2346 if (cfqd->rq_in_flight[BLK_RW_SYNC] && !cfq_cfqq_sync(cfqq))
0b182d61 2347 return false;
2f5cb738 2348
abc3c744 2349 max_dispatch = max_t(unsigned int, cfqd->cfq_quantum / 2, 1);
2f5cb738
JA
2350 if (cfq_class_idle(cfqq))
2351 max_dispatch = 1;
b4878f24 2352
2f5cb738
JA
2353 /*
2354 * Does this cfqq already have too much IO in flight?
2355 */
2356 if (cfqq->dispatched >= max_dispatch) {
ef8a41df 2357 bool promote_sync = false;
2f5cb738
JA
2358 /*
2359 * idle queue must always only have a single IO in flight
2360 */
3ed9a296 2361 if (cfq_class_idle(cfqq))
0b182d61 2362 return false;
3ed9a296 2363
ef8a41df 2364 /*
c4ade94f
LS
2365 * If there is only one sync queue
2366 * we can ignore async queue here and give the sync
ef8a41df
SL
2367 * queue no dispatch limit. The reason is a sync queue can
2368 * preempt async queue, limiting the sync queue doesn't make
2369 * sense. This is useful for aiostress test.
2370 */
c4ade94f
LS
2371 if (cfq_cfqq_sync(cfqq) && cfqd->busy_sync_queues == 1)
2372 promote_sync = true;
ef8a41df 2373
2f5cb738
JA
2374 /*
2375 * We have other queues, don't allow more IO from this one
2376 */
ef8a41df
SL
2377 if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq) &&
2378 !promote_sync)
0b182d61 2379 return false;
9ede209e 2380
365722bb 2381 /*
474b18cc 2382 * Sole queue user, no limit
365722bb 2383 */
ef8a41df 2384 if (cfqd->busy_queues == 1 || promote_sync)
abc3c744
SL
2385 max_dispatch = -1;
2386 else
2387 /*
2388 * Normally we start throttling cfqq when cfq_quantum/2
2389 * requests have been dispatched. But we can drive
2390 * deeper queue depths at the beginning of slice
2391 * subjected to upper limit of cfq_quantum.
2392 * */
2393 max_dispatch = cfqd->cfq_quantum;
8e296755
JA
2394 }
2395
2396 /*
2397 * Async queues must wait a bit before being allowed dispatch.
2398 * We also ramp up the dispatch depth gradually for async IO,
2399 * based on the last sync IO we serviced
2400 */
963b72fc 2401 if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
573412b2 2402 unsigned long last_sync = jiffies - cfqd->last_delayed_sync;
8e296755 2403 unsigned int depth;
365722bb 2404
61f0c1dc 2405 depth = last_sync / cfqd->cfq_slice[1];
e00c54c3
JA
2406 if (!depth && !cfqq->dispatched)
2407 depth = 1;
8e296755
JA
2408 if (depth < max_dispatch)
2409 max_dispatch = depth;
2f5cb738 2410 }
3ed9a296 2411
0b182d61
JA
2412 /*
2413 * If we're below the current max, allow a dispatch
2414 */
2415 return cfqq->dispatched < max_dispatch;
2416}
2417
2418/*
2419 * Dispatch a request from cfqq, moving them to the request queue
2420 * dispatch list.
2421 */
2422static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2423{
2424 struct request *rq;
2425
2426 BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
2427
2428 if (!cfq_may_dispatch(cfqd, cfqq))
2429 return false;
2430
2431 /*
2432 * follow expired path, else get first next available
2433 */
2434 rq = cfq_check_fifo(cfqq);
2435 if (!rq)
2436 rq = cfqq->next_rq;
2437
2438 /*
2439 * insert request into driver dispatch list
2440 */
2441 cfq_dispatch_insert(cfqd->queue, rq);
2442
2443 if (!cfqd->active_cic) {
c5869807 2444 struct cfq_io_cq *cic = RQ_CIC(rq);
0b182d61 2445
c5869807 2446 atomic_long_inc(&cic->icq.ioc->refcount);
0b182d61
JA
2447 cfqd->active_cic = cic;
2448 }
2449
2450 return true;
2451}
2452
2453/*
2454 * Find the cfqq that we need to service and move a request from that to the
2455 * dispatch list
2456 */
2457static int cfq_dispatch_requests(struct request_queue *q, int force)
2458{
2459 struct cfq_data *cfqd = q->elevator->elevator_data;
2460 struct cfq_queue *cfqq;
2461
2462 if (!cfqd->busy_queues)
2463 return 0;
2464
2465 if (unlikely(force))
2466 return cfq_forced_dispatch(cfqd);
2467
2468 cfqq = cfq_select_queue(cfqd);
2469 if (!cfqq)
8e296755
JA
2470 return 0;
2471
2f5cb738 2472 /*
0b182d61 2473 * Dispatch a request from this cfqq, if it is allowed
2f5cb738 2474 */
0b182d61
JA
2475 if (!cfq_dispatch_request(cfqd, cfqq))
2476 return 0;
2477
2f5cb738 2478 cfqq->slice_dispatch++;
b029195d 2479 cfq_clear_cfqq_must_dispatch(cfqq);
22e2c507 2480
2f5cb738
JA
2481 /*
2482 * expire an async queue immediately if it has used up its slice. idle
2483 * queue always expire after 1 dispatch round.
2484 */
2485 if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
2486 cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
2487 cfq_class_idle(cfqq))) {
2488 cfqq->slice_end = jiffies + 1;
e5ff082e 2489 cfq_slice_expired(cfqd, 0);
1da177e4
LT
2490 }
2491
b217a903 2492 cfq_log_cfqq(cfqd, cfqq, "dispatched a request");
2f5cb738 2493 return 1;
1da177e4
LT
2494}
2495
1da177e4 2496/*
5e705374
JA
2497 * task holds one reference to the queue, dropped when task exits. each rq
2498 * in-flight on this queue also holds a reference, dropped when rq is freed.
1da177e4 2499 *
b1c35769 2500 * Each cfq queue took a reference on the parent group. Drop it now.
1da177e4
LT
2501 * queue lock must be held here.
2502 */
2503static void cfq_put_queue(struct cfq_queue *cfqq)
2504{
22e2c507 2505 struct cfq_data *cfqd = cfqq->cfqd;
0bbfeb83 2506 struct cfq_group *cfqg;
22e2c507 2507
30d7b944 2508 BUG_ON(cfqq->ref <= 0);
1da177e4 2509
30d7b944
SL
2510 cfqq->ref--;
2511 if (cfqq->ref)
1da177e4
LT
2512 return;
2513
7b679138 2514 cfq_log_cfqq(cfqd, cfqq, "put_queue");
1da177e4 2515 BUG_ON(rb_first(&cfqq->sort_list));
22e2c507 2516 BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
b1c35769 2517 cfqg = cfqq->cfqg;
1da177e4 2518
28f95cbc 2519 if (unlikely(cfqd->active_queue == cfqq)) {
e5ff082e 2520 __cfq_slice_expired(cfqd, cfqq, 0);
23e018a1 2521 cfq_schedule_dispatch(cfqd);
28f95cbc 2522 }
22e2c507 2523
f04a6424 2524 BUG_ON(cfq_cfqq_on_rr(cfqq));
1da177e4 2525 kmem_cache_free(cfq_pool, cfqq);
eb7d8c07 2526 cfqg_put(cfqg);
1da177e4
LT
2527}
2528
d02a2c07 2529static void cfq_put_cooperator(struct cfq_queue *cfqq)
1da177e4 2530{
df5fe3e8
JM
2531 struct cfq_queue *__cfqq, *next;
2532
df5fe3e8
JM
2533 /*
2534 * If this queue was scheduled to merge with another queue, be
2535 * sure to drop the reference taken on that queue (and others in
2536 * the merge chain). See cfq_setup_merge and cfq_merge_cfqqs.
2537 */
2538 __cfqq = cfqq->new_cfqq;
2539 while (__cfqq) {
2540 if (__cfqq == cfqq) {
2541 WARN(1, "cfqq->new_cfqq loop detected\n");
2542 break;
2543 }
2544 next = __cfqq->new_cfqq;
2545 cfq_put_queue(__cfqq);
2546 __cfqq = next;
2547 }
d02a2c07
SL
2548}
2549
2550static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2551{
2552 if (unlikely(cfqq == cfqd->active_queue)) {
2553 __cfq_slice_expired(cfqd, cfqq, 0);
2554 cfq_schedule_dispatch(cfqd);
2555 }
2556
2557 cfq_put_cooperator(cfqq);
df5fe3e8 2558
89850f7e
JA
2559 cfq_put_queue(cfqq);
2560}
22e2c507 2561
9b84cacd
TH
2562static void cfq_init_icq(struct io_cq *icq)
2563{
2564 struct cfq_io_cq *cic = icq_to_cic(icq);
2565
2566 cic->ttime.last_end_request = jiffies;
2567}
2568
c5869807 2569static void cfq_exit_icq(struct io_cq *icq)
89850f7e 2570{
c5869807 2571 struct cfq_io_cq *cic = icq_to_cic(icq);
283287a5 2572 struct cfq_data *cfqd = cic_to_cfqd(cic);
4faa3c81 2573
ff6657c6
JA
2574 if (cic->cfqq[BLK_RW_ASYNC]) {
2575 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]);
2576 cic->cfqq[BLK_RW_ASYNC] = NULL;
12a05732
AV
2577 }
2578
ff6657c6
JA
2579 if (cic->cfqq[BLK_RW_SYNC]) {
2580 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_SYNC]);
2581 cic->cfqq[BLK_RW_SYNC] = NULL;
12a05732 2582 }
89850f7e
JA
2583}
2584
abede6da 2585static void cfq_init_prio_data(struct cfq_queue *cfqq, struct cfq_io_cq *cic)
22e2c507
JA
2586{
2587 struct task_struct *tsk = current;
2588 int ioprio_class;
2589
3b18152c 2590 if (!cfq_cfqq_prio_changed(cfqq))
22e2c507
JA
2591 return;
2592
598971bf 2593 ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
22e2c507 2594 switch (ioprio_class) {
fe094d98
JA
2595 default:
2596 printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
2597 case IOPRIO_CLASS_NONE:
2598 /*
6d63c275 2599 * no prio set, inherit CPU scheduling settings
fe094d98
JA
2600 */
2601 cfqq->ioprio = task_nice_ioprio(tsk);
6d63c275 2602 cfqq->ioprio_class = task_nice_ioclass(tsk);
fe094d98
JA
2603 break;
2604 case IOPRIO_CLASS_RT:
598971bf 2605 cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
fe094d98
JA
2606 cfqq->ioprio_class = IOPRIO_CLASS_RT;
2607 break;
2608 case IOPRIO_CLASS_BE:
598971bf 2609 cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
fe094d98
JA
2610 cfqq->ioprio_class = IOPRIO_CLASS_BE;
2611 break;
2612 case IOPRIO_CLASS_IDLE:
2613 cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
2614 cfqq->ioprio = 7;
2615 cfq_clear_cfqq_idle_window(cfqq);
2616 break;
22e2c507
JA
2617 }
2618
2619 /*
2620 * keep track of original prio settings in case we have to temporarily
2621 * elevate the priority of this queue
2622 */
2623 cfqq->org_ioprio = cfqq->ioprio;
3b18152c 2624 cfq_clear_cfqq_prio_changed(cfqq);
22e2c507
JA
2625}
2626
598971bf 2627static void check_ioprio_changed(struct cfq_io_cq *cic, struct bio *bio)
22e2c507 2628{
598971bf 2629 int ioprio = cic->icq.ioc->ioprio;
bca4b914 2630 struct cfq_data *cfqd = cic_to_cfqd(cic);
478a82b0 2631 struct cfq_queue *cfqq;
35e6077c 2632
598971bf
TH
2633 /*
2634 * Check whether ioprio has changed. The condition may trigger
2635 * spuriously on a newly created cic but there's no harm.
2636 */
2637 if (unlikely(!cfqd) || likely(cic->ioprio == ioprio))
caaa5f9f
JA
2638 return;
2639
ff6657c6 2640 cfqq = cic->cfqq[BLK_RW_ASYNC];
caaa5f9f
JA
2641 if (cfqq) {
2642 struct cfq_queue *new_cfqq;
abede6da
TH
2643 new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic, bio,
2644 GFP_ATOMIC);
caaa5f9f 2645 if (new_cfqq) {
ff6657c6 2646 cic->cfqq[BLK_RW_ASYNC] = new_cfqq;
caaa5f9f
JA
2647 cfq_put_queue(cfqq);
2648 }
22e2c507 2649 }
caaa5f9f 2650
ff6657c6 2651 cfqq = cic->cfqq[BLK_RW_SYNC];
caaa5f9f
JA
2652 if (cfqq)
2653 cfq_mark_cfqq_prio_changed(cfqq);
598971bf
TH
2654
2655 cic->ioprio = ioprio;
22e2c507
JA
2656}
2657
d5036d77 2658static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
a6151c3a 2659 pid_t pid, bool is_sync)
d5036d77
JA
2660{
2661 RB_CLEAR_NODE(&cfqq->rb_node);
2662 RB_CLEAR_NODE(&cfqq->p_node);
2663 INIT_LIST_HEAD(&cfqq->fifo);
2664
30d7b944 2665 cfqq->ref = 0;
d5036d77
JA
2666 cfqq->cfqd = cfqd;
2667
2668 cfq_mark_cfqq_prio_changed(cfqq);
2669
2670 if (is_sync) {
2671 if (!cfq_class_idle(cfqq))
2672 cfq_mark_cfqq_idle_window(cfqq);
2673 cfq_mark_cfqq_sync(cfqq);
2674 }
2675 cfqq->pid = pid;
2676}
2677
24610333 2678#ifdef CONFIG_CFQ_GROUP_IOSCHED
598971bf 2679static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
24610333 2680{
bca4b914 2681 struct cfq_data *cfqd = cic_to_cfqd(cic);
598971bf
TH
2682 struct cfq_queue *sync_cfqq;
2683 uint64_t id;
24610333 2684
598971bf
TH
2685 rcu_read_lock();
2686 id = bio_blkio_cgroup(bio)->id;
2687 rcu_read_unlock();
24610333 2688
598971bf
TH
2689 /*
2690 * Check whether blkcg has changed. The condition may trigger
2691 * spuriously on a newly created cic but there's no harm.
2692 */
2693 if (unlikely(!cfqd) || likely(cic->blkcg_id == id))
2694 return;
24610333 2695
598971bf 2696 sync_cfqq = cic_to_cfqq(cic, 1);
24610333
VG
2697 if (sync_cfqq) {
2698 /*
2699 * Drop reference to sync queue. A new sync queue will be
2700 * assigned in new group upon arrival of a fresh request.
2701 */
2702 cfq_log_cfqq(cfqd, sync_cfqq, "changed cgroup");
2703 cic_set_cfqq(cic, NULL, 1);
2704 cfq_put_queue(sync_cfqq);
2705 }
598971bf
TH
2706
2707 cic->blkcg_id = id;
24610333 2708}
598971bf
TH
2709#else
2710static inline void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio) { }
24610333
VG
2711#endif /* CONFIG_CFQ_GROUP_IOSCHED */
2712
22e2c507 2713static struct cfq_queue *
abede6da
TH
2714cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
2715 struct bio *bio, gfp_t gfp_mask)
22e2c507 2716{
0a5a7d0e 2717 struct blkio_cgroup *blkcg;
22e2c507 2718 struct cfq_queue *cfqq, *new_cfqq = NULL;
cdb16e8f 2719 struct cfq_group *cfqg;
22e2c507
JA
2720
2721retry:
2a7f1244
TH
2722 rcu_read_lock();
2723
4f85cb96 2724 blkcg = bio_blkio_cgroup(bio);
cd1604fa 2725 cfqg = cfq_lookup_create_cfqg(cfqd, blkcg);
91fac317 2726 cfqq = cic_to_cfqq(cic, is_sync);
22e2c507 2727
6118b70b
JA
2728 /*
2729 * Always try a new alloc if we fell back to the OOM cfqq
2730 * originally, since it should just be a temporary situation.
2731 */
2732 if (!cfqq || cfqq == &cfqd->oom_cfqq) {
2733 cfqq = NULL;
22e2c507
JA
2734 if (new_cfqq) {
2735 cfqq = new_cfqq;
2736 new_cfqq = NULL;
2737 } else if (gfp_mask & __GFP_WAIT) {
2a7f1244 2738 rcu_read_unlock();
22e2c507 2739 spin_unlock_irq(cfqd->queue->queue_lock);
94f6030c 2740 new_cfqq = kmem_cache_alloc_node(cfq_pool,
6118b70b 2741 gfp_mask | __GFP_ZERO,
94f6030c 2742 cfqd->queue->node);
22e2c507 2743 spin_lock_irq(cfqd->queue->queue_lock);
6118b70b
JA
2744 if (new_cfqq)
2745 goto retry;
22e2c507 2746 } else {
94f6030c
CL
2747 cfqq = kmem_cache_alloc_node(cfq_pool,
2748 gfp_mask | __GFP_ZERO,
2749 cfqd->queue->node);
22e2c507
JA
2750 }
2751
6118b70b
JA
2752 if (cfqq) {
2753 cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
abede6da 2754 cfq_init_prio_data(cfqq, cic);
cdb16e8f 2755 cfq_link_cfqq_cfqg(cfqq, cfqg);
6118b70b
JA
2756 cfq_log_cfqq(cfqd, cfqq, "alloced");
2757 } else
2758 cfqq = &cfqd->oom_cfqq;
22e2c507
JA
2759 }
2760
2761 if (new_cfqq)
2762 kmem_cache_free(cfq_pool, new_cfqq);
2763
2a7f1244 2764 rcu_read_unlock();
22e2c507
JA
2765 return cfqq;
2766}
2767
c2dea2d1
VT
2768static struct cfq_queue **
2769cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
2770{
fe094d98 2771 switch (ioprio_class) {
c2dea2d1
VT
2772 case IOPRIO_CLASS_RT:
2773 return &cfqd->async_cfqq[0][ioprio];
598971bf
TH
2774 case IOPRIO_CLASS_NONE:
2775 ioprio = IOPRIO_NORM;
2776 /* fall through */
c2dea2d1
VT
2777 case IOPRIO_CLASS_BE:
2778 return &cfqd->async_cfqq[1][ioprio];
2779 case IOPRIO_CLASS_IDLE:
2780 return &cfqd->async_idle_cfqq;
2781 default:
2782 BUG();
2783 }
2784}
2785
15c31be4 2786static struct cfq_queue *
abede6da 2787cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
4f85cb96 2788 struct bio *bio, gfp_t gfp_mask)
15c31be4 2789{
598971bf
TH
2790 const int ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
2791 const int ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
c2dea2d1 2792 struct cfq_queue **async_cfqq = NULL;
15c31be4
JA
2793 struct cfq_queue *cfqq = NULL;
2794
c2dea2d1
VT
2795 if (!is_sync) {
2796 async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio);
2797 cfqq = *async_cfqq;
2798 }
2799
6118b70b 2800 if (!cfqq)
abede6da 2801 cfqq = cfq_find_alloc_queue(cfqd, is_sync, cic, bio, gfp_mask);
15c31be4
JA
2802
2803 /*
2804 * pin the queue now that it's allocated, scheduler exit will prune it
2805 */
c2dea2d1 2806 if (!is_sync && !(*async_cfqq)) {
30d7b944 2807 cfqq->ref++;
c2dea2d1 2808 *async_cfqq = cfqq;
15c31be4
JA
2809 }
2810
30d7b944 2811 cfqq->ref++;
15c31be4
JA
2812 return cfqq;
2813}
2814
22e2c507 2815static void
383cd721 2816__cfq_update_io_thinktime(struct cfq_ttime *ttime, unsigned long slice_idle)
1da177e4 2817{
383cd721
SL
2818 unsigned long elapsed = jiffies - ttime->last_end_request;
2819 elapsed = min(elapsed, 2UL * slice_idle);
db3b5848 2820
383cd721
SL
2821 ttime->ttime_samples = (7*ttime->ttime_samples + 256) / 8;
2822 ttime->ttime_total = (7*ttime->ttime_total + 256*elapsed) / 8;
2823 ttime->ttime_mean = (ttime->ttime_total + 128) / ttime->ttime_samples;
2824}
2825
2826static void
2827cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
c5869807 2828 struct cfq_io_cq *cic)
383cd721 2829{
f5f2b6ce 2830 if (cfq_cfqq_sync(cfqq)) {
383cd721 2831 __cfq_update_io_thinktime(&cic->ttime, cfqd->cfq_slice_idle);
f5f2b6ce
SL
2832 __cfq_update_io_thinktime(&cfqq->service_tree->ttime,
2833 cfqd->cfq_slice_idle);
2834 }
7700fc4f
SL
2835#ifdef CONFIG_CFQ_GROUP_IOSCHED
2836 __cfq_update_io_thinktime(&cfqq->cfqg->ttime, cfqd->cfq_group_idle);
2837#endif
22e2c507 2838}
1da177e4 2839
206dc69b 2840static void
b2c18e1e 2841cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
6d048f53 2842 struct request *rq)
206dc69b 2843{
3dde36dd 2844 sector_t sdist = 0;
41647e7a 2845 sector_t n_sec = blk_rq_sectors(rq);
3dde36dd
CZ
2846 if (cfqq->last_request_pos) {
2847 if (cfqq->last_request_pos < blk_rq_pos(rq))
2848 sdist = blk_rq_pos(rq) - cfqq->last_request_pos;
2849 else
2850 sdist = cfqq->last_request_pos - blk_rq_pos(rq);
2851 }
206dc69b 2852
3dde36dd 2853 cfqq->seek_history <<= 1;
41647e7a
CZ
2854 if (blk_queue_nonrot(cfqd->queue))
2855 cfqq->seek_history |= (n_sec < CFQQ_SECT_THR_NONROT);
2856 else
2857 cfqq->seek_history |= (sdist > CFQQ_SEEK_THR);
206dc69b 2858}
1da177e4 2859
22e2c507
JA
2860/*
2861 * Disable idle window if the process thinks too long or seeks so much that
2862 * it doesn't matter
2863 */
2864static void
2865cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
c5869807 2866 struct cfq_io_cq *cic)
22e2c507 2867{
7b679138 2868 int old_idle, enable_idle;
1be92f2f 2869
0871714e
JA
2870 /*
2871 * Don't idle for async or idle io prio class
2872 */
2873 if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq))
1be92f2f
JA
2874 return;
2875
c265a7f4 2876 enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
1da177e4 2877
76280aff
CZ
2878 if (cfqq->queued[0] + cfqq->queued[1] >= 4)
2879 cfq_mark_cfqq_deep(cfqq);
2880
749ef9f8
CZ
2881 if (cfqq->next_rq && (cfqq->next_rq->cmd_flags & REQ_NOIDLE))
2882 enable_idle = 0;
f6e8d01b 2883 else if (!atomic_read(&cic->icq.ioc->active_ref) ||
c5869807
TH
2884 !cfqd->cfq_slice_idle ||
2885 (!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq)))
22e2c507 2886 enable_idle = 0;
383cd721
SL
2887 else if (sample_valid(cic->ttime.ttime_samples)) {
2888 if (cic->ttime.ttime_mean > cfqd->cfq_slice_idle)
22e2c507
JA
2889 enable_idle = 0;
2890 else
2891 enable_idle = 1;
1da177e4
LT
2892 }
2893
7b679138
JA
2894 if (old_idle != enable_idle) {
2895 cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle);
2896 if (enable_idle)
2897 cfq_mark_cfqq_idle_window(cfqq);
2898 else
2899 cfq_clear_cfqq_idle_window(cfqq);
2900 }
22e2c507 2901}
1da177e4 2902
22e2c507
JA
2903/*
2904 * Check if new_cfqq should preempt the currently active queue. Return 0 for
2905 * no or if we aren't sure, a 1 will cause a preempt.
2906 */
a6151c3a 2907static bool
22e2c507 2908cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
5e705374 2909 struct request *rq)
22e2c507 2910{
6d048f53 2911 struct cfq_queue *cfqq;
22e2c507 2912
6d048f53
JA
2913 cfqq = cfqd->active_queue;
2914 if (!cfqq)
a6151c3a 2915 return false;
22e2c507 2916
6d048f53 2917 if (cfq_class_idle(new_cfqq))
a6151c3a 2918 return false;
22e2c507
JA
2919
2920 if (cfq_class_idle(cfqq))
a6151c3a 2921 return true;
1e3335de 2922
875feb63
DS
2923 /*
2924 * Don't allow a non-RT request to preempt an ongoing RT cfqq timeslice.
2925 */
2926 if (cfq_class_rt(cfqq) && !cfq_class_rt(new_cfqq))
2927 return false;
2928
374f84ac
JA
2929 /*
2930 * if the new request is sync, but the currently running queue is
2931 * not, let the sync request have priority.
2932 */
5e705374 2933 if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
a6151c3a 2934 return true;
1e3335de 2935
8682e1f1
VG
2936 if (new_cfqq->cfqg != cfqq->cfqg)
2937 return false;
2938
2939 if (cfq_slice_used(cfqq))
2940 return true;
2941
2942 /* Allow preemption only if we are idling on sync-noidle tree */
2943 if (cfqd->serving_type == SYNC_NOIDLE_WORKLOAD &&
2944 cfqq_type(new_cfqq) == SYNC_NOIDLE_WORKLOAD &&
2945 new_cfqq->service_tree->count == 2 &&
2946 RB_EMPTY_ROOT(&cfqq->sort_list))
2947 return true;
2948
b53d1ed7
JA
2949 /*
2950 * So both queues are sync. Let the new request get disk time if
2951 * it's a metadata request and the current queue is doing regular IO.
2952 */
65299a3b 2953 if ((rq->cmd_flags & REQ_PRIO) && !cfqq->prio_pending)
b53d1ed7
JA
2954 return true;
2955
3a9a3f6c
DS
2956 /*
2957 * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
2958 */
2959 if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
a6151c3a 2960 return true;
3a9a3f6c 2961
d2d59e18
SL
2962 /* An idle queue should not be idle now for some reason */
2963 if (RB_EMPTY_ROOT(&cfqq->sort_list) && !cfq_should_idle(cfqd, cfqq))
2964 return true;
2965
1e3335de 2966 if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
a6151c3a 2967 return false;
1e3335de
JA
2968
2969 /*
2970 * if this request is as-good as one we would expect from the
2971 * current cfqq, let it preempt
2972 */
e9ce335d 2973 if (cfq_rq_close(cfqd, cfqq, rq))
a6151c3a 2974 return true;
1e3335de 2975
a6151c3a 2976 return false;
22e2c507
JA
2977}
2978
2979/*
2980 * cfqq preempts the active queue. if we allowed preempt with no slice left,
2981 * let it have half of its nominal slice.
2982 */
2983static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2984{
df0793ab
SL
2985 enum wl_type_t old_type = cfqq_type(cfqd->active_queue);
2986
7b679138 2987 cfq_log_cfqq(cfqd, cfqq, "preempt");
df0793ab 2988 cfq_slice_expired(cfqd, 1);
22e2c507 2989
f8ae6e3e
SL
2990 /*
2991 * workload type is changed, don't save slice, otherwise preempt
2992 * doesn't happen
2993 */
df0793ab 2994 if (old_type != cfqq_type(cfqq))
f8ae6e3e
SL
2995 cfqq->cfqg->saved_workload_slice = 0;
2996
bf572256
JA
2997 /*
2998 * Put the new queue at the front of the of the current list,
2999 * so we know that it will be selected next.
3000 */
3001 BUG_ON(!cfq_cfqq_on_rr(cfqq));
edd75ffd
JA
3002
3003 cfq_service_tree_add(cfqd, cfqq, 1);
eda5e0c9 3004
62a37f6b
JT
3005 cfqq->slice_end = 0;
3006 cfq_mark_cfqq_slice_new(cfqq);
22e2c507
JA
3007}
3008
22e2c507 3009/*
5e705374 3010 * Called when a new fs request (rq) is added (to cfqq). Check if there's
22e2c507
JA
3011 * something we should do about it
3012 */
3013static void
5e705374
JA
3014cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3015 struct request *rq)
22e2c507 3016{
c5869807 3017 struct cfq_io_cq *cic = RQ_CIC(rq);
12e9fddd 3018
45333d5a 3019 cfqd->rq_queued++;
65299a3b
CH
3020 if (rq->cmd_flags & REQ_PRIO)
3021 cfqq->prio_pending++;
374f84ac 3022
383cd721 3023 cfq_update_io_thinktime(cfqd, cfqq, cic);
b2c18e1e 3024 cfq_update_io_seektime(cfqd, cfqq, rq);
9c2c38a1
JA
3025 cfq_update_idle_window(cfqd, cfqq, cic);
3026
b2c18e1e 3027 cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
22e2c507
JA
3028
3029 if (cfqq == cfqd->active_queue) {
3030 /*
b029195d
JA
3031 * Remember that we saw a request from this process, but
3032 * don't start queuing just yet. Otherwise we risk seeing lots
3033 * of tiny requests, because we disrupt the normal plugging
d6ceb25e
JA
3034 * and merging. If the request is already larger than a single
3035 * page, let it rip immediately. For that case we assume that
2d870722
JA
3036 * merging is already done. Ditto for a busy system that
3037 * has other work pending, don't risk delaying until the
3038 * idle timer unplug to continue working.
22e2c507 3039 */
d6ceb25e 3040 if (cfq_cfqq_wait_request(cfqq)) {
2d870722
JA
3041 if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
3042 cfqd->busy_queues > 1) {
812df48d 3043 cfq_del_timer(cfqd, cfqq);
554554f6 3044 cfq_clear_cfqq_wait_request(cfqq);
24ecfbe2 3045 __blk_run_queue(cfqd->queue);
a11cdaa7 3046 } else {
e98ef89b 3047 cfq_blkiocg_update_idle_time_stats(
c1768268
TH
3048 cfqg_to_blkg(cfqq->cfqg),
3049 &blkio_policy_cfq);
bf791937 3050 cfq_mark_cfqq_must_dispatch(cfqq);
a11cdaa7 3051 }
d6ceb25e 3052 }
5e705374 3053 } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
22e2c507
JA
3054 /*
3055 * not the active queue - expire current slice if it is
3056 * idle and has expired it's mean thinktime or this new queue
3a9a3f6c
DS
3057 * has some old slice time left and is of higher priority or
3058 * this new queue is RT and the current one is BE
22e2c507
JA
3059 */
3060 cfq_preempt_queue(cfqd, cfqq);
24ecfbe2 3061 __blk_run_queue(cfqd->queue);
22e2c507 3062 }
1da177e4
LT
3063}
3064
165125e1 3065static void cfq_insert_request(struct request_queue *q, struct request *rq)
1da177e4 3066{
b4878f24 3067 struct cfq_data *cfqd = q->elevator->elevator_data;
5e705374 3068 struct cfq_queue *cfqq = RQ_CFQQ(rq);
22e2c507 3069
7b679138 3070 cfq_log_cfqq(cfqd, cfqq, "insert_request");
abede6da 3071 cfq_init_prio_data(cfqq, RQ_CIC(rq));
1da177e4 3072
30996f40 3073 rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
22e2c507 3074 list_add_tail(&rq->queuelist, &cfqq->fifo);
aa6f6a3d 3075 cfq_add_rq_rb(rq);
0381411e 3076 cfq_blkiocg_update_io_add_stats(cfqg_to_blkg(RQ_CFQG(rq)),
c1768268 3077 &blkio_policy_cfq,
0381411e
TH
3078 cfqg_to_blkg(cfqd->serving_group),
3079 rq_data_dir(rq), rq_is_sync(rq));
5e705374 3080 cfq_rq_enqueued(cfqd, cfqq, rq);
1da177e4
LT
3081}
3082
45333d5a
AC
3083/*
3084 * Update hw_tag based on peak queue depth over 50 samples under
3085 * sufficient load.
3086 */
3087static void cfq_update_hw_tag(struct cfq_data *cfqd)
3088{
1a1238a7
SL
3089 struct cfq_queue *cfqq = cfqd->active_queue;
3090
53c583d2
CZ
3091 if (cfqd->rq_in_driver > cfqd->hw_tag_est_depth)
3092 cfqd->hw_tag_est_depth = cfqd->rq_in_driver;
e459dd08
CZ
3093
3094 if (cfqd->hw_tag == 1)
3095 return;
45333d5a
AC
3096
3097 if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN &&
53c583d2 3098 cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN)
45333d5a
AC
3099 return;
3100
1a1238a7
SL
3101 /*
3102 * If active queue hasn't enough requests and can idle, cfq might not
3103 * dispatch sufficient requests to hardware. Don't zero hw_tag in this
3104 * case
3105 */
3106 if (cfqq && cfq_cfqq_idle_window(cfqq) &&
3107 cfqq->dispatched + cfqq->queued[0] + cfqq->queued[1] <
53c583d2 3108 CFQ_HW_QUEUE_MIN && cfqd->rq_in_driver < CFQ_HW_QUEUE_MIN)
1a1238a7
SL
3109 return;
3110
45333d5a
AC
3111 if (cfqd->hw_tag_samples++ < 50)
3112 return;
3113
e459dd08 3114 if (cfqd->hw_tag_est_depth >= CFQ_HW_QUEUE_MIN)
45333d5a
AC
3115 cfqd->hw_tag = 1;
3116 else
3117 cfqd->hw_tag = 0;
45333d5a
AC
3118}
3119
7667aa06
VG
3120static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3121{
c5869807 3122 struct cfq_io_cq *cic = cfqd->active_cic;
7667aa06 3123
02a8f01b
JT
3124 /* If the queue already has requests, don't wait */
3125 if (!RB_EMPTY_ROOT(&cfqq->sort_list))
3126 return false;
3127
7667aa06
VG
3128 /* If there are other queues in the group, don't wait */
3129 if (cfqq->cfqg->nr_cfqq > 1)
3130 return false;
3131
7700fc4f
SL
3132 /* the only queue in the group, but think time is big */
3133 if (cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true))
3134 return false;
3135
7667aa06
VG
3136 if (cfq_slice_used(cfqq))
3137 return true;
3138
3139 /* if slice left is less than think time, wait busy */
383cd721
SL
3140 if (cic && sample_valid(cic->ttime.ttime_samples)
3141 && (cfqq->slice_end - jiffies < cic->ttime.ttime_mean))
7667aa06
VG
3142 return true;
3143
3144 /*
3145 * If think times is less than a jiffy than ttime_mean=0 and above
3146 * will not be true. It might happen that slice has not expired yet
3147 * but will expire soon (4-5 ns) during select_queue(). To cover the
3148 * case where think time is less than a jiffy, mark the queue wait
3149 * busy if only 1 jiffy is left in the slice.
3150 */
3151 if (cfqq->slice_end - jiffies == 1)
3152 return true;
3153
3154 return false;
3155}
3156
165125e1 3157static void cfq_completed_request(struct request_queue *q, struct request *rq)
1da177e4 3158{
5e705374 3159 struct cfq_queue *cfqq = RQ_CFQQ(rq);
b4878f24 3160 struct cfq_data *cfqd = cfqq->cfqd;
5380a101 3161 const int sync = rq_is_sync(rq);
b4878f24 3162 unsigned long now;
1da177e4 3163
b4878f24 3164 now = jiffies;
33659ebb
CH
3165 cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d",
3166 !!(rq->cmd_flags & REQ_NOIDLE));
1da177e4 3167
45333d5a
AC
3168 cfq_update_hw_tag(cfqd);
3169
53c583d2 3170 WARN_ON(!cfqd->rq_in_driver);
6d048f53 3171 WARN_ON(!cfqq->dispatched);
53c583d2 3172 cfqd->rq_in_driver--;
6d048f53 3173 cfqq->dispatched--;
80bdf0c7 3174 (RQ_CFQG(rq))->dispatched--;
0381411e 3175 cfq_blkiocg_update_completion_stats(cfqg_to_blkg(cfqq->cfqg),
c1768268
TH
3176 &blkio_policy_cfq, rq_start_time_ns(rq),
3177 rq_io_start_time_ns(rq), rq_data_dir(rq),
3178 rq_is_sync(rq));
1da177e4 3179
53c583d2 3180 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
3ed9a296 3181
365722bb 3182 if (sync) {
f5f2b6ce
SL
3183 struct cfq_rb_root *service_tree;
3184
383cd721 3185 RQ_CIC(rq)->ttime.last_end_request = now;
f5f2b6ce
SL
3186
3187 if (cfq_cfqq_on_rr(cfqq))
3188 service_tree = cfqq->service_tree;
3189 else
3190 service_tree = service_tree_for(cfqq->cfqg,
3191 cfqq_prio(cfqq), cfqq_type(cfqq));
3192 service_tree->ttime.last_end_request = now;
573412b2
CZ
3193 if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now))
3194 cfqd->last_delayed_sync = now;
365722bb 3195 }
caaa5f9f 3196
7700fc4f
SL
3197#ifdef CONFIG_CFQ_GROUP_IOSCHED
3198 cfqq->cfqg->ttime.last_end_request = now;
3199#endif
3200
caaa5f9f
JA
3201 /*
3202 * If this is the active queue, check if it needs to be expired,
3203 * or if we want to idle in case it has no pending requests.
3204 */
3205 if (cfqd->active_queue == cfqq) {
a36e71f9
JA
3206 const bool cfqq_empty = RB_EMPTY_ROOT(&cfqq->sort_list);
3207
44f7c160
JA
3208 if (cfq_cfqq_slice_new(cfqq)) {
3209 cfq_set_prio_slice(cfqd, cfqq);
3210 cfq_clear_cfqq_slice_new(cfqq);
3211 }
f75edf2d
VG
3212
3213 /*
7667aa06
VG
3214 * Should we wait for next request to come in before we expire
3215 * the queue.
f75edf2d 3216 */
7667aa06 3217 if (cfq_should_wait_busy(cfqd, cfqq)) {
80bdf0c7
VG
3218 unsigned long extend_sl = cfqd->cfq_slice_idle;
3219 if (!cfqd->cfq_slice_idle)
3220 extend_sl = cfqd->cfq_group_idle;
3221 cfqq->slice_end = jiffies + extend_sl;
f75edf2d 3222 cfq_mark_cfqq_wait_busy(cfqq);
b1ffe737 3223 cfq_log_cfqq(cfqd, cfqq, "will busy wait");
f75edf2d
VG
3224 }
3225
a36e71f9 3226 /*
8e550632
CZ
3227 * Idling is not enabled on:
3228 * - expired queues
3229 * - idle-priority queues
3230 * - async queues
3231 * - queues with still some requests queued
3232 * - when there is a close cooperator
a36e71f9 3233 */
0871714e 3234 if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
e5ff082e 3235 cfq_slice_expired(cfqd, 1);
8e550632
CZ
3236 else if (sync && cfqq_empty &&
3237 !cfq_close_cooperator(cfqd, cfqq)) {
749ef9f8 3238 cfq_arm_slice_timer(cfqd);
8e550632 3239 }
caaa5f9f 3240 }
6d048f53 3241
53c583d2 3242 if (!cfqd->rq_in_driver)
23e018a1 3243 cfq_schedule_dispatch(cfqd);
1da177e4
LT
3244}
3245
89850f7e 3246static inline int __cfq_may_queue(struct cfq_queue *cfqq)
22e2c507 3247{
1b379d8d 3248 if (cfq_cfqq_wait_request(cfqq) && !cfq_cfqq_must_alloc_slice(cfqq)) {
3b18152c 3249 cfq_mark_cfqq_must_alloc_slice(cfqq);
22e2c507 3250 return ELV_MQUEUE_MUST;
3b18152c 3251 }
1da177e4 3252
22e2c507 3253 return ELV_MQUEUE_MAY;
22e2c507
JA
3254}
3255
165125e1 3256static int cfq_may_queue(struct request_queue *q, int rw)
22e2c507
JA
3257{
3258 struct cfq_data *cfqd = q->elevator->elevator_data;
3259 struct task_struct *tsk = current;
c5869807 3260 struct cfq_io_cq *cic;
22e2c507
JA
3261 struct cfq_queue *cfqq;
3262
3263 /*
3264 * don't force setup of a queue from here, as a call to may_queue
3265 * does not necessarily imply that a request actually will be queued.
3266 * so just lookup a possibly existing queue, or return 'may queue'
3267 * if that fails
3268 */
4ac845a2 3269 cic = cfq_cic_lookup(cfqd, tsk->io_context);
91fac317
VT
3270 if (!cic)
3271 return ELV_MQUEUE_MAY;
3272
b0b78f81 3273 cfqq = cic_to_cfqq(cic, rw_is_sync(rw));
22e2c507 3274 if (cfqq) {
abede6da 3275 cfq_init_prio_data(cfqq, cic);
22e2c507 3276
89850f7e 3277 return __cfq_may_queue(cfqq);
22e2c507
JA
3278 }
3279
3280 return ELV_MQUEUE_MAY;
1da177e4
LT
3281}
3282
1da177e4
LT
3283/*
3284 * queue lock held here
3285 */
bb37b94c 3286static void cfq_put_request(struct request *rq)
1da177e4 3287{
5e705374 3288 struct cfq_queue *cfqq = RQ_CFQQ(rq);
1da177e4 3289
5e705374 3290 if (cfqq) {
22e2c507 3291 const int rw = rq_data_dir(rq);
1da177e4 3292
22e2c507
JA
3293 BUG_ON(!cfqq->allocated[rw]);
3294 cfqq->allocated[rw]--;
1da177e4 3295
7f1dc8a2 3296 /* Put down rq reference on cfqg */
eb7d8c07 3297 cfqg_put(RQ_CFQG(rq));
a612fddf
TH
3298 rq->elv.priv[0] = NULL;
3299 rq->elv.priv[1] = NULL;
7f1dc8a2 3300
1da177e4
LT
3301 cfq_put_queue(cfqq);
3302 }
3303}
3304
df5fe3e8 3305static struct cfq_queue *
c5869807 3306cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_cq *cic,
df5fe3e8
JM
3307 struct cfq_queue *cfqq)
3308{
3309 cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq);
3310 cic_set_cfqq(cic, cfqq->new_cfqq, 1);
b3b6d040 3311 cfq_mark_cfqq_coop(cfqq->new_cfqq);
df5fe3e8
JM
3312 cfq_put_queue(cfqq);
3313 return cic_to_cfqq(cic, 1);
3314}
3315
e6c5bc73
JM
3316/*
3317 * Returns NULL if a new cfqq should be allocated, or the old cfqq if this
3318 * was the last process referring to said cfqq.
3319 */
3320static struct cfq_queue *
c5869807 3321split_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq)
e6c5bc73
JM
3322{
3323 if (cfqq_process_refs(cfqq) == 1) {
e6c5bc73
JM
3324 cfqq->pid = current->pid;
3325 cfq_clear_cfqq_coop(cfqq);
ae54abed 3326 cfq_clear_cfqq_split_coop(cfqq);
e6c5bc73
JM
3327 return cfqq;
3328 }
3329
3330 cic_set_cfqq(cic, NULL, 1);
d02a2c07
SL
3331
3332 cfq_put_cooperator(cfqq);
3333
e6c5bc73
JM
3334 cfq_put_queue(cfqq);
3335 return NULL;
3336}
1da177e4 3337/*
22e2c507 3338 * Allocate cfq data structures associated with this request.
1da177e4 3339 */
22e2c507 3340static int
852c788f
TH
3341cfq_set_request(struct request_queue *q, struct request *rq, struct bio *bio,
3342 gfp_t gfp_mask)
1da177e4
LT
3343{
3344 struct cfq_data *cfqd = q->elevator->elevator_data;
f1f8cc94 3345 struct cfq_io_cq *cic = icq_to_cic(rq->elv.icq);
1da177e4 3346 const int rw = rq_data_dir(rq);
a6151c3a 3347 const bool is_sync = rq_is_sync(rq);
22e2c507 3348 struct cfq_queue *cfqq;
1da177e4
LT
3349
3350 might_sleep_if(gfp_mask & __GFP_WAIT);
3351
216284c3 3352 spin_lock_irq(q->queue_lock);
f1f8cc94 3353
598971bf
TH
3354 check_ioprio_changed(cic, bio);
3355 check_blkcg_changed(cic, bio);
e6c5bc73 3356new_queue:
91fac317 3357 cfqq = cic_to_cfqq(cic, is_sync);
32f2e807 3358 if (!cfqq || cfqq == &cfqd->oom_cfqq) {
abede6da 3359 cfqq = cfq_get_queue(cfqd, is_sync, cic, bio, gfp_mask);
91fac317 3360 cic_set_cfqq(cic, cfqq, is_sync);
df5fe3e8 3361 } else {
e6c5bc73
JM
3362 /*
3363 * If the queue was seeky for too long, break it apart.
3364 */
ae54abed 3365 if (cfq_cfqq_coop(cfqq) && cfq_cfqq_split_coop(cfqq)) {
e6c5bc73
JM
3366 cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq");
3367 cfqq = split_cfqq(cic, cfqq);
3368 if (!cfqq)
3369 goto new_queue;
3370 }
3371
df5fe3e8
JM
3372 /*
3373 * Check to see if this queue is scheduled to merge with
3374 * another, closely cooperating queue. The merging of
3375 * queues happens here as it must be done in process context.
3376 * The reference on new_cfqq was taken in merge_cfqqs.
3377 */
3378 if (cfqq->new_cfqq)
3379 cfqq = cfq_merge_cfqqs(cfqd, cic, cfqq);
91fac317 3380 }
1da177e4
LT
3381
3382 cfqq->allocated[rw]++;
1da177e4 3383
6fae9c25 3384 cfqq->ref++;
eb7d8c07 3385 cfqg_get(cfqq->cfqg);
a612fddf 3386 rq->elv.priv[0] = cfqq;
1adaf3dd 3387 rq->elv.priv[1] = cfqq->cfqg;
216284c3 3388 spin_unlock_irq(q->queue_lock);
5e705374 3389 return 0;
1da177e4
LT
3390}
3391
65f27f38 3392static void cfq_kick_queue(struct work_struct *work)
22e2c507 3393{
65f27f38 3394 struct cfq_data *cfqd =
23e018a1 3395 container_of(work, struct cfq_data, unplug_work);
165125e1 3396 struct request_queue *q = cfqd->queue;
22e2c507 3397
40bb54d1 3398 spin_lock_irq(q->queue_lock);
24ecfbe2 3399 __blk_run_queue(cfqd->queue);
40bb54d1 3400 spin_unlock_irq(q->queue_lock);
22e2c507
JA
3401}
3402
3403/*
3404 * Timer running if the active_queue is currently idling inside its time slice
3405 */
3406static void cfq_idle_slice_timer(unsigned long data)
3407{
3408 struct cfq_data *cfqd = (struct cfq_data *) data;
3409 struct cfq_queue *cfqq;
3410 unsigned long flags;
3c6bd2f8 3411 int timed_out = 1;
22e2c507 3412
7b679138
JA
3413 cfq_log(cfqd, "idle timer fired");
3414
22e2c507
JA
3415 spin_lock_irqsave(cfqd->queue->queue_lock, flags);
3416
fe094d98
JA
3417 cfqq = cfqd->active_queue;
3418 if (cfqq) {
3c6bd2f8
JA
3419 timed_out = 0;
3420
b029195d
JA
3421 /*
3422 * We saw a request before the queue expired, let it through
3423 */
3424 if (cfq_cfqq_must_dispatch(cfqq))
3425 goto out_kick;
3426
22e2c507
JA
3427 /*
3428 * expired
3429 */
44f7c160 3430 if (cfq_slice_used(cfqq))
22e2c507
JA
3431 goto expire;
3432
3433 /*
3434 * only expire and reinvoke request handler, if there are
3435 * other queues with pending requests
3436 */
caaa5f9f 3437 if (!cfqd->busy_queues)
22e2c507 3438 goto out_cont;
22e2c507
JA
3439
3440 /*
3441 * not expired and it has a request pending, let it dispatch
3442 */
75e50984 3443 if (!RB_EMPTY_ROOT(&cfqq->sort_list))
22e2c507 3444 goto out_kick;
76280aff
CZ
3445
3446 /*
3447 * Queue depth flag is reset only when the idle didn't succeed
3448 */
3449 cfq_clear_cfqq_deep(cfqq);
22e2c507
JA
3450 }
3451expire:
e5ff082e 3452 cfq_slice_expired(cfqd, timed_out);
22e2c507 3453out_kick:
23e018a1 3454 cfq_schedule_dispatch(cfqd);
22e2c507
JA
3455out_cont:
3456 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
3457}
3458
3b18152c
JA
3459static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
3460{
3461 del_timer_sync(&cfqd->idle_slice_timer);
23e018a1 3462 cancel_work_sync(&cfqd->unplug_work);
3b18152c 3463}
22e2c507 3464
c2dea2d1
VT
3465static void cfq_put_async_queues(struct cfq_data *cfqd)
3466{
3467 int i;
3468
3469 for (i = 0; i < IOPRIO_BE_NR; i++) {
3470 if (cfqd->async_cfqq[0][i])
3471 cfq_put_queue(cfqd->async_cfqq[0][i]);
3472 if (cfqd->async_cfqq[1][i])
3473 cfq_put_queue(cfqd->async_cfqq[1][i]);
c2dea2d1 3474 }
2389d1ef
ON
3475
3476 if (cfqd->async_idle_cfqq)
3477 cfq_put_queue(cfqd->async_idle_cfqq);
c2dea2d1
VT
3478}
3479
b374d18a 3480static void cfq_exit_queue(struct elevator_queue *e)
1da177e4 3481{
22e2c507 3482 struct cfq_data *cfqd = e->elevator_data;
165125e1 3483 struct request_queue *q = cfqd->queue;
22e2c507 3484
3b18152c 3485 cfq_shutdown_timer_wq(cfqd);
e2d74ac0 3486
d9ff4187 3487 spin_lock_irq(q->queue_lock);
e2d74ac0 3488
d9ff4187 3489 if (cfqd->active_queue)
e5ff082e 3490 __cfq_slice_expired(cfqd, cfqd->active_queue, 0);
e2d74ac0 3491
c2dea2d1 3492 cfq_put_async_queues(cfqd);
03aa264a
TH
3493
3494 spin_unlock_irq(q->queue_lock);
3495
a90d742e
AV
3496 cfq_shutdown_timer_wq(cfqd);
3497
f51b802c
TH
3498#ifndef CONFIG_CFQ_GROUP_IOSCHED
3499 kfree(cfqd->root_group);
2abae55f 3500#endif
e8989fae 3501 update_root_blkg_pd(q, BLKIO_POLICY_PROP);
56edf7d7 3502 kfree(cfqd);
1da177e4
LT
3503}
3504
b2fab5ac 3505static int cfq_init_queue(struct request_queue *q)
1da177e4
LT
3506{
3507 struct cfq_data *cfqd;
cd1604fa 3508 struct blkio_group *blkg __maybe_unused;
f51b802c 3509 int i;
1da177e4 3510
94f6030c 3511 cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
a73f730d 3512 if (!cfqd)
b2fab5ac 3513 return -ENOMEM;
80b15c73 3514
f51b802c
TH
3515 cfqd->queue = q;
3516 q->elevator->elevator_data = cfqd;
3517
1fa8f6d6
VG
3518 /* Init root service tree */
3519 cfqd->grp_service_tree = CFQ_RB_ROOT;
3520
f51b802c 3521 /* Init root group and prefer root group over other groups by default */
25fb5169 3522#ifdef CONFIG_CFQ_GROUP_IOSCHED
f51b802c
TH
3523 rcu_read_lock();
3524 spin_lock_irq(q->queue_lock);
5624a4e4 3525
aaec55a0 3526 blkg = blkg_lookup_create(&blkio_root_cgroup, q, true);
cd1604fa 3527 if (!IS_ERR(blkg))
0381411e 3528 cfqd->root_group = blkg_to_cfqg(blkg);
f51b802c
TH
3529
3530 spin_unlock_irq(q->queue_lock);
3531 rcu_read_unlock();
3532#else
3533 cfqd->root_group = kzalloc_node(sizeof(*cfqd->root_group),
3534 GFP_KERNEL, cfqd->queue->node);
3535 if (cfqd->root_group)
3536 cfq_init_cfqg_base(cfqd->root_group);
3537#endif
3538 if (!cfqd->root_group) {
5624a4e4 3539 kfree(cfqd);
b2fab5ac 3540 return -ENOMEM;
5624a4e4
VG
3541 }
3542
f51b802c 3543 cfqd->root_group->weight = 2*BLKIO_WEIGHT_DEFAULT;
5624a4e4 3544
26a2ac00
JA
3545 /*
3546 * Not strictly needed (since RB_ROOT just clears the node and we
3547 * zeroed cfqd on alloc), but better be safe in case someone decides
3548 * to add magic to the rb code
3549 */
3550 for (i = 0; i < CFQ_PRIO_LISTS; i++)
3551 cfqd->prio_trees[i] = RB_ROOT;
3552
6118b70b
JA
3553 /*
3554 * Our fallback cfqq if cfq_find_alloc_queue() runs into OOM issues.
3555 * Grab a permanent reference to it, so that the normal code flow
f51b802c
TH
3556 * will not attempt to free it. oom_cfqq is linked to root_group
3557 * but shouldn't hold a reference as it'll never be unlinked. Lose
3558 * the reference from linking right away.
6118b70b
JA
3559 */
3560 cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
30d7b944 3561 cfqd->oom_cfqq.ref++;
1adaf3dd
TH
3562
3563 spin_lock_irq(q->queue_lock);
f51b802c 3564 cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, cfqd->root_group);
eb7d8c07 3565 cfqg_put(cfqd->root_group);
1adaf3dd 3566 spin_unlock_irq(q->queue_lock);
1da177e4 3567
22e2c507
JA
3568 init_timer(&cfqd->idle_slice_timer);
3569 cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
3570 cfqd->idle_slice_timer.data = (unsigned long) cfqd;
3571
23e018a1 3572 INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
22e2c507 3573
1da177e4 3574 cfqd->cfq_quantum = cfq_quantum;
22e2c507
JA
3575 cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
3576 cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
1da177e4
LT
3577 cfqd->cfq_back_max = cfq_back_max;
3578 cfqd->cfq_back_penalty = cfq_back_penalty;
22e2c507
JA
3579 cfqd->cfq_slice[0] = cfq_slice_async;
3580 cfqd->cfq_slice[1] = cfq_slice_sync;
3581 cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
3582 cfqd->cfq_slice_idle = cfq_slice_idle;
80bdf0c7 3583 cfqd->cfq_group_idle = cfq_group_idle;
963b72fc 3584 cfqd->cfq_latency = 1;
e459dd08 3585 cfqd->hw_tag = -1;
edc71131
CZ
3586 /*
3587 * we optimistically start assuming sync ops weren't delayed in last
3588 * second, in order to have larger depth for async operations.
3589 */
573412b2 3590 cfqd->last_delayed_sync = jiffies - HZ;
b2fab5ac 3591 return 0;
1da177e4
LT
3592}
3593
1da177e4
LT
3594/*
3595 * sysfs parts below -->
3596 */
1da177e4
LT
3597static ssize_t
3598cfq_var_show(unsigned int var, char *page)
3599{
3600 return sprintf(page, "%d\n", var);
3601}
3602
3603static ssize_t
3604cfq_var_store(unsigned int *var, const char *page, size_t count)
3605{
3606 char *p = (char *) page;
3607
3608 *var = simple_strtoul(p, &p, 10);
3609 return count;
3610}
3611
1da177e4 3612#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
b374d18a 3613static ssize_t __FUNC(struct elevator_queue *e, char *page) \
1da177e4 3614{ \
3d1ab40f 3615 struct cfq_data *cfqd = e->elevator_data; \
1da177e4
LT
3616 unsigned int __data = __VAR; \
3617 if (__CONV) \
3618 __data = jiffies_to_msecs(__data); \
3619 return cfq_var_show(__data, (page)); \
3620}
3621SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
22e2c507
JA
3622SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
3623SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
e572ec7e
AV
3624SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
3625SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
22e2c507 3626SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
80bdf0c7 3627SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1);
22e2c507
JA
3628SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
3629SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
3630SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
963b72fc 3631SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
1da177e4
LT
3632#undef SHOW_FUNCTION
3633
3634#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
b374d18a 3635static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
1da177e4 3636{ \
3d1ab40f 3637 struct cfq_data *cfqd = e->elevator_data; \
1da177e4
LT
3638 unsigned int __data; \
3639 int ret = cfq_var_store(&__data, (page), count); \
3640 if (__data < (MIN)) \
3641 __data = (MIN); \
3642 else if (__data > (MAX)) \
3643 __data = (MAX); \
3644 if (__CONV) \
3645 *(__PTR) = msecs_to_jiffies(__data); \
3646 else \
3647 *(__PTR) = __data; \
3648 return ret; \
3649}
3650STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
fe094d98
JA
3651STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1,
3652 UINT_MAX, 1);
3653STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1,
3654 UINT_MAX, 1);
e572ec7e 3655STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
fe094d98
JA
3656STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
3657 UINT_MAX, 0);
22e2c507 3658STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
80bdf0c7 3659STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1);
22e2c507
JA
3660STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
3661STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
fe094d98
JA
3662STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
3663 UINT_MAX, 0);
963b72fc 3664STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
1da177e4
LT
3665#undef STORE_FUNCTION
3666
e572ec7e
AV
3667#define CFQ_ATTR(name) \
3668 __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)
3669
3670static struct elv_fs_entry cfq_attrs[] = {
3671 CFQ_ATTR(quantum),
e572ec7e
AV
3672 CFQ_ATTR(fifo_expire_sync),
3673 CFQ_ATTR(fifo_expire_async),
3674 CFQ_ATTR(back_seek_max),
3675 CFQ_ATTR(back_seek_penalty),
3676 CFQ_ATTR(slice_sync),
3677 CFQ_ATTR(slice_async),
3678 CFQ_ATTR(slice_async_rq),
3679 CFQ_ATTR(slice_idle),
80bdf0c7 3680 CFQ_ATTR(group_idle),
963b72fc 3681 CFQ_ATTR(low_latency),
e572ec7e 3682 __ATTR_NULL
1da177e4
LT
3683};
3684
1da177e4
LT
3685static struct elevator_type iosched_cfq = {
3686 .ops = {
3687 .elevator_merge_fn = cfq_merge,
3688 .elevator_merged_fn = cfq_merged_request,
3689 .elevator_merge_req_fn = cfq_merged_requests,
da775265 3690 .elevator_allow_merge_fn = cfq_allow_merge,
812d4026 3691 .elevator_bio_merged_fn = cfq_bio_merged,
b4878f24 3692 .elevator_dispatch_fn = cfq_dispatch_requests,
1da177e4 3693 .elevator_add_req_fn = cfq_insert_request,
b4878f24 3694 .elevator_activate_req_fn = cfq_activate_request,
1da177e4 3695 .elevator_deactivate_req_fn = cfq_deactivate_request,
1da177e4 3696 .elevator_completed_req_fn = cfq_completed_request,
21183b07
JA
3697 .elevator_former_req_fn = elv_rb_former_request,
3698 .elevator_latter_req_fn = elv_rb_latter_request,
9b84cacd 3699 .elevator_init_icq_fn = cfq_init_icq,
7e5a8794 3700 .elevator_exit_icq_fn = cfq_exit_icq,
1da177e4
LT
3701 .elevator_set_req_fn = cfq_set_request,
3702 .elevator_put_req_fn = cfq_put_request,
3703 .elevator_may_queue_fn = cfq_may_queue,
3704 .elevator_init_fn = cfq_init_queue,
3705 .elevator_exit_fn = cfq_exit_queue,
3706 },
3d3c2379
TH
3707 .icq_size = sizeof(struct cfq_io_cq),
3708 .icq_align = __alignof__(struct cfq_io_cq),
3d1ab40f 3709 .elevator_attrs = cfq_attrs,
3d3c2379 3710 .elevator_name = "cfq",
1da177e4
LT
3711 .elevator_owner = THIS_MODULE,
3712};
3713
3e252066
VG
3714#ifdef CONFIG_CFQ_GROUP_IOSCHED
3715static struct blkio_policy_type blkio_policy_cfq = {
3716 .ops = {
0381411e 3717 .blkio_init_group_fn = cfq_init_blkio_group,
3e252066
VG
3718 .blkio_update_group_weight_fn = cfq_update_blkio_group_weight,
3719 },
062a644d 3720 .plid = BLKIO_POLICY_PROP,
0381411e 3721 .pdata_size = sizeof(struct cfq_group),
3e252066 3722};
3e252066
VG
3723#endif
3724
1da177e4
LT
3725static int __init cfq_init(void)
3726{
3d3c2379
TH
3727 int ret;
3728
22e2c507
JA
3729 /*
3730 * could be 0 on HZ < 1000 setups
3731 */
3732 if (!cfq_slice_async)
3733 cfq_slice_async = 1;
3734 if (!cfq_slice_idle)
3735 cfq_slice_idle = 1;
3736
80bdf0c7
VG
3737#ifdef CONFIG_CFQ_GROUP_IOSCHED
3738 if (!cfq_group_idle)
3739 cfq_group_idle = 1;
3740#else
3741 cfq_group_idle = 0;
3742#endif
3d3c2379
TH
3743 cfq_pool = KMEM_CACHE(cfq_queue, 0);
3744 if (!cfq_pool)
1da177e4
LT
3745 return -ENOMEM;
3746
3d3c2379
TH
3747 ret = elv_register(&iosched_cfq);
3748 if (ret) {
3749 kmem_cache_destroy(cfq_pool);
3750 return ret;
3751 }
3d3c2379 3752
b95ada55 3753#ifdef CONFIG_CFQ_GROUP_IOSCHED
3e252066 3754 blkio_policy_register(&blkio_policy_cfq);
b95ada55 3755#endif
2fdd82bd 3756 return 0;
1da177e4
LT
3757}
3758
3759static void __exit cfq_exit(void)
3760{
b95ada55 3761#ifdef CONFIG_CFQ_GROUP_IOSCHED
3e252066 3762 blkio_policy_unregister(&blkio_policy_cfq);
b95ada55 3763#endif
1da177e4 3764 elv_unregister(&iosched_cfq);
3d3c2379 3765 kmem_cache_destroy(cfq_pool);
1da177e4
LT
3766}
3767
3768module_init(cfq_init);
3769module_exit(cfq_exit);
3770
3771MODULE_AUTHOR("Jens Axboe");
3772MODULE_LICENSE("GPL");
3773MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");