blkcg: don't use blkg->plid in stat related functions
[linux-2.6-block.git] / block / cfq-iosched.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * CFQ, or complete fairness queueing, disk scheduler.
3 *
4 * Based on ideas from a previously unfinished io
5 * scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
6 *
0fe23479 7 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
1da177e4 8 */
1da177e4 9#include <linux/module.h>
5a0e3ad6 10#include <linux/slab.h>
1cc9be68
AV
11#include <linux/blkdev.h>
12#include <linux/elevator.h>
ad5ebd2f 13#include <linux/jiffies.h>
1da177e4 14#include <linux/rbtree.h>
22e2c507 15#include <linux/ioprio.h>
7b679138 16#include <linux/blktrace_api.h>
6e736be7 17#include "blk.h"
e98ef89b 18#include "cfq.h"
1da177e4 19
0381411e
TH
20static struct blkio_policy_type blkio_policy_cfq;
21
1da177e4
LT
22/*
23 * tunables
24 */
fe094d98 25/* max queue in one round of service */
abc3c744 26static const int cfq_quantum = 8;
64100099 27static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
fe094d98
JA
28/* maximum backwards seek, in KiB */
29static const int cfq_back_max = 16 * 1024;
30/* penalty of a backwards seek */
31static const int cfq_back_penalty = 2;
64100099 32static const int cfq_slice_sync = HZ / 10;
3b18152c 33static int cfq_slice_async = HZ / 25;
64100099 34static const int cfq_slice_async_rq = 2;
caaa5f9f 35static int cfq_slice_idle = HZ / 125;
80bdf0c7 36static int cfq_group_idle = HZ / 125;
5db5d642
CZ
37static const int cfq_target_latency = HZ * 3/10; /* 300 ms */
38static const int cfq_hist_divisor = 4;
22e2c507 39
d9e7620e 40/*
0871714e 41 * offset from end of service tree
d9e7620e 42 */
0871714e 43#define CFQ_IDLE_DELAY (HZ / 5)
d9e7620e
JA
44
45/*
46 * below this threshold, we consider thinktime immediate
47 */
48#define CFQ_MIN_TT (2)
49
22e2c507 50#define CFQ_SLICE_SCALE (5)
45333d5a 51#define CFQ_HW_QUEUE_MIN (5)
25bc6b07 52#define CFQ_SERVICE_SHIFT 12
22e2c507 53
3dde36dd 54#define CFQQ_SEEK_THR (sector_t)(8 * 100)
e9ce335d 55#define CFQQ_CLOSE_THR (sector_t)(8 * 1024)
41647e7a 56#define CFQQ_SECT_THR_NONROT (sector_t)(2 * 32)
3dde36dd 57#define CFQQ_SEEKY(cfqq) (hweight32(cfqq->seek_history) > 32/8)
ae54abed 58
a612fddf
TH
59#define RQ_CIC(rq) icq_to_cic((rq)->elv.icq)
60#define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elv.priv[0])
61#define RQ_CFQG(rq) (struct cfq_group *) ((rq)->elv.priv[1])
1da177e4 62
e18b890b 63static struct kmem_cache *cfq_pool;
1da177e4 64
22e2c507
JA
65#define CFQ_PRIO_LISTS IOPRIO_BE_NR
66#define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
22e2c507
JA
67#define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
68
206dc69b 69#define sample_valid(samples) ((samples) > 80)
1fa8f6d6 70#define rb_entry_cfqg(node) rb_entry((node), struct cfq_group, rb_node)
206dc69b 71
c5869807
TH
72struct cfq_ttime {
73 unsigned long last_end_request;
74
75 unsigned long ttime_total;
76 unsigned long ttime_samples;
77 unsigned long ttime_mean;
78};
79
cc09e299
JA
80/*
81 * Most of our rbtree usage is for sorting with min extraction, so
82 * if we cache the leftmost node we don't have to walk down the tree
83 * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should
84 * move this into the elevator for the rq sorting as well.
85 */
86struct cfq_rb_root {
87 struct rb_root rb;
88 struct rb_node *left;
aa6f6a3d 89 unsigned count;
73e9ffdd 90 unsigned total_weight;
1fa8f6d6 91 u64 min_vdisktime;
f5f2b6ce 92 struct cfq_ttime ttime;
cc09e299 93};
f5f2b6ce
SL
94#define CFQ_RB_ROOT (struct cfq_rb_root) { .rb = RB_ROOT, \
95 .ttime = {.last_end_request = jiffies,},}
cc09e299 96
6118b70b
JA
97/*
98 * Per process-grouping structure
99 */
100struct cfq_queue {
101 /* reference count */
30d7b944 102 int ref;
6118b70b
JA
103 /* various state flags, see below */
104 unsigned int flags;
105 /* parent cfq_data */
106 struct cfq_data *cfqd;
107 /* service_tree member */
108 struct rb_node rb_node;
109 /* service_tree key */
110 unsigned long rb_key;
111 /* prio tree member */
112 struct rb_node p_node;
113 /* prio tree root we belong to, if any */
114 struct rb_root *p_root;
115 /* sorted list of pending requests */
116 struct rb_root sort_list;
117 /* if fifo isn't expired, next request to serve */
118 struct request *next_rq;
119 /* requests queued in sort_list */
120 int queued[2];
121 /* currently allocated requests */
122 int allocated[2];
123 /* fifo list of requests in sort_list */
124 struct list_head fifo;
125
dae739eb
VG
126 /* time when queue got scheduled in to dispatch first request. */
127 unsigned long dispatch_start;
f75edf2d 128 unsigned int allocated_slice;
c4081ba5 129 unsigned int slice_dispatch;
dae739eb
VG
130 /* time when first request from queue completed and slice started. */
131 unsigned long slice_start;
6118b70b
JA
132 unsigned long slice_end;
133 long slice_resid;
6118b70b 134
65299a3b
CH
135 /* pending priority requests */
136 int prio_pending;
6118b70b
JA
137 /* number of requests that are on the dispatch list or inside driver */
138 int dispatched;
139
140 /* io prio of this group */
141 unsigned short ioprio, org_ioprio;
4aede84b 142 unsigned short ioprio_class;
6118b70b 143
c4081ba5
RK
144 pid_t pid;
145
3dde36dd 146 u32 seek_history;
b2c18e1e
JM
147 sector_t last_request_pos;
148
aa6f6a3d 149 struct cfq_rb_root *service_tree;
df5fe3e8 150 struct cfq_queue *new_cfqq;
cdb16e8f 151 struct cfq_group *cfqg;
c4e7893e
VG
152 /* Number of sectors dispatched from queue in single dispatch round */
153 unsigned long nr_sectors;
6118b70b
JA
154};
155
c0324a02 156/*
718eee05 157 * First index in the service_trees.
c0324a02
CZ
158 * IDLE is handled separately, so it has negative index
159 */
160enum wl_prio_t {
c0324a02 161 BE_WORKLOAD = 0,
615f0259
VG
162 RT_WORKLOAD = 1,
163 IDLE_WORKLOAD = 2,
b4627321 164 CFQ_PRIO_NR,
c0324a02
CZ
165};
166
718eee05
CZ
167/*
168 * Second index in the service_trees.
169 */
170enum wl_type_t {
171 ASYNC_WORKLOAD = 0,
172 SYNC_NOIDLE_WORKLOAD = 1,
173 SYNC_WORKLOAD = 2
174};
175
cdb16e8f
VG
176/* This is per cgroup per device grouping structure */
177struct cfq_group {
1fa8f6d6
VG
178 /* group service_tree member */
179 struct rb_node rb_node;
180
181 /* group service_tree key */
182 u64 vdisktime;
25bc6b07 183 unsigned int weight;
8184f93e
JT
184 unsigned int new_weight;
185 bool needs_update;
1fa8f6d6
VG
186
187 /* number of cfqq currently on this group */
188 int nr_cfqq;
189
cdb16e8f 190 /*
4495a7d4 191 * Per group busy queues average. Useful for workload slice calc. We
b4627321
VG
192 * create the array for each prio class but at run time it is used
193 * only for RT and BE class and slot for IDLE class remains unused.
194 * This is primarily done to avoid confusion and a gcc warning.
195 */
196 unsigned int busy_queues_avg[CFQ_PRIO_NR];
197 /*
198 * rr lists of queues with requests. We maintain service trees for
199 * RT and BE classes. These trees are subdivided in subclasses
200 * of SYNC, SYNC_NOIDLE and ASYNC based on workload type. For IDLE
201 * class there is no subclassification and all the cfq queues go on
202 * a single tree service_tree_idle.
cdb16e8f
VG
203 * Counts are embedded in the cfq_rb_root
204 */
205 struct cfq_rb_root service_trees[2][3];
206 struct cfq_rb_root service_tree_idle;
dae739eb
VG
207
208 unsigned long saved_workload_slice;
209 enum wl_type_t saved_workload;
210 enum wl_prio_t saved_serving_prio;
25fb5169
VG
211#ifdef CONFIG_CFQ_GROUP_IOSCHED
212 struct hlist_node cfqd_node;
213#endif
80bdf0c7
VG
214 /* number of requests that are on the dispatch list or inside driver */
215 int dispatched;
7700fc4f 216 struct cfq_ttime ttime;
cdb16e8f 217};
718eee05 218
c5869807
TH
219struct cfq_io_cq {
220 struct io_cq icq; /* must be the first member */
221 struct cfq_queue *cfqq[2];
222 struct cfq_ttime ttime;
223};
224
22e2c507
JA
225/*
226 * Per block device queue structure
227 */
1da177e4 228struct cfq_data {
165125e1 229 struct request_queue *queue;
1fa8f6d6
VG
230 /* Root service tree for cfq_groups */
231 struct cfq_rb_root grp_service_tree;
f51b802c 232 struct cfq_group *root_group;
22e2c507 233
c0324a02
CZ
234 /*
235 * The priority currently being served
22e2c507 236 */
c0324a02 237 enum wl_prio_t serving_prio;
718eee05
CZ
238 enum wl_type_t serving_type;
239 unsigned long workload_expires;
cdb16e8f 240 struct cfq_group *serving_group;
a36e71f9
JA
241
242 /*
243 * Each priority tree is sorted by next_request position. These
244 * trees are used when determining if two or more queues are
245 * interleaving requests (see cfq_close_cooperator).
246 */
247 struct rb_root prio_trees[CFQ_PRIO_LISTS];
248
22e2c507 249 unsigned int busy_queues;
ef8a41df 250 unsigned int busy_sync_queues;
22e2c507 251
53c583d2
CZ
252 int rq_in_driver;
253 int rq_in_flight[2];
45333d5a
AC
254
255 /*
256 * queue-depth detection
257 */
258 int rq_queued;
25776e35 259 int hw_tag;
e459dd08
CZ
260 /*
261 * hw_tag can be
262 * -1 => indeterminate, (cfq will behave as if NCQ is present, to allow better detection)
263 * 1 => NCQ is present (hw_tag_est_depth is the estimated max depth)
264 * 0 => no NCQ
265 */
266 int hw_tag_est_depth;
267 unsigned int hw_tag_samples;
1da177e4 268
22e2c507
JA
269 /*
270 * idle window management
271 */
272 struct timer_list idle_slice_timer;
23e018a1 273 struct work_struct unplug_work;
1da177e4 274
22e2c507 275 struct cfq_queue *active_queue;
c5869807 276 struct cfq_io_cq *active_cic;
22e2c507 277
c2dea2d1
VT
278 /*
279 * async queue for each priority case
280 */
281 struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
282 struct cfq_queue *async_idle_cfqq;
15c31be4 283
6d048f53 284 sector_t last_position;
1da177e4 285
1da177e4
LT
286 /*
287 * tunables, see top of file
288 */
289 unsigned int cfq_quantum;
22e2c507 290 unsigned int cfq_fifo_expire[2];
1da177e4
LT
291 unsigned int cfq_back_penalty;
292 unsigned int cfq_back_max;
22e2c507
JA
293 unsigned int cfq_slice[2];
294 unsigned int cfq_slice_async_rq;
295 unsigned int cfq_slice_idle;
80bdf0c7 296 unsigned int cfq_group_idle;
963b72fc 297 unsigned int cfq_latency;
d9ff4187 298
6118b70b
JA
299 /*
300 * Fallback dummy cfqq for extreme OOM conditions
301 */
302 struct cfq_queue oom_cfqq;
365722bb 303
573412b2 304 unsigned long last_delayed_sync;
25fb5169
VG
305
306 /* List of cfq groups being managed on this device*/
307 struct hlist_head cfqg_list;
56edf7d7
VG
308
309 /* Number of groups which are on blkcg->blkg_list */
310 unsigned int nr_blkcg_linked_grps;
1da177e4
LT
311};
312
0381411e
TH
313static inline struct cfq_group *blkg_to_cfqg(struct blkio_group *blkg)
314{
315 return blkg_to_pdata(blkg, &blkio_policy_cfq);
316}
317
318static inline struct blkio_group *cfqg_to_blkg(struct cfq_group *cfqg)
319{
320 return pdata_to_blkg(cfqg, &blkio_policy_cfq);
321}
322
25fb5169
VG
323static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
324
cdb16e8f
VG
325static struct cfq_rb_root *service_tree_for(struct cfq_group *cfqg,
326 enum wl_prio_t prio,
65b32a57 327 enum wl_type_t type)
c0324a02 328{
1fa8f6d6
VG
329 if (!cfqg)
330 return NULL;
331
c0324a02 332 if (prio == IDLE_WORKLOAD)
cdb16e8f 333 return &cfqg->service_tree_idle;
c0324a02 334
cdb16e8f 335 return &cfqg->service_trees[prio][type];
c0324a02
CZ
336}
337
3b18152c 338enum cfqq_state_flags {
b0b8d749
JA
339 CFQ_CFQQ_FLAG_on_rr = 0, /* on round-robin busy list */
340 CFQ_CFQQ_FLAG_wait_request, /* waiting for a request */
b029195d 341 CFQ_CFQQ_FLAG_must_dispatch, /* must be allowed a dispatch */
b0b8d749 342 CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */
b0b8d749
JA
343 CFQ_CFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */
344 CFQ_CFQQ_FLAG_idle_window, /* slice idling enabled */
345 CFQ_CFQQ_FLAG_prio_changed, /* task priority has changed */
44f7c160 346 CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */
91fac317 347 CFQ_CFQQ_FLAG_sync, /* synchronous queue */
b3b6d040 348 CFQ_CFQQ_FLAG_coop, /* cfqq is shared */
ae54abed 349 CFQ_CFQQ_FLAG_split_coop, /* shared cfqq will be splitted */
76280aff 350 CFQ_CFQQ_FLAG_deep, /* sync cfqq experienced large depth */
f75edf2d 351 CFQ_CFQQ_FLAG_wait_busy, /* Waiting for next request */
3b18152c
JA
352};
353
354#define CFQ_CFQQ_FNS(name) \
355static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq) \
356{ \
fe094d98 357 (cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name); \
3b18152c
JA
358} \
359static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq) \
360{ \
fe094d98 361 (cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name); \
3b18152c
JA
362} \
363static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \
364{ \
fe094d98 365 return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0; \
3b18152c
JA
366}
367
368CFQ_CFQQ_FNS(on_rr);
369CFQ_CFQQ_FNS(wait_request);
b029195d 370CFQ_CFQQ_FNS(must_dispatch);
3b18152c 371CFQ_CFQQ_FNS(must_alloc_slice);
3b18152c
JA
372CFQ_CFQQ_FNS(fifo_expire);
373CFQ_CFQQ_FNS(idle_window);
374CFQ_CFQQ_FNS(prio_changed);
44f7c160 375CFQ_CFQQ_FNS(slice_new);
91fac317 376CFQ_CFQQ_FNS(sync);
a36e71f9 377CFQ_CFQQ_FNS(coop);
ae54abed 378CFQ_CFQQ_FNS(split_coop);
76280aff 379CFQ_CFQQ_FNS(deep);
f75edf2d 380CFQ_CFQQ_FNS(wait_busy);
3b18152c
JA
381#undef CFQ_CFQQ_FNS
382
afc24d49 383#ifdef CONFIG_CFQ_GROUP_IOSCHED
2868ef7b
VG
384#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
385 blk_add_trace_msg((cfqd)->queue, "cfq%d%c %s " fmt, (cfqq)->pid, \
386 cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
0381411e 387 blkg_path(cfqg_to_blkg((cfqq)->cfqg)), ##args)
2868ef7b
VG
388
389#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) \
390 blk_add_trace_msg((cfqd)->queue, "%s " fmt, \
0381411e 391 blkg_path(cfqg_to_blkg((cfqg))), ##args) \
2868ef7b
VG
392
393#else
7b679138
JA
394#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
395 blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args)
4495a7d4 396#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do {} while (0)
2868ef7b 397#endif
7b679138
JA
398#define cfq_log(cfqd, fmt, args...) \
399 blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
400
615f0259
VG
401/* Traverses through cfq group service trees */
402#define for_each_cfqg_st(cfqg, i, j, st) \
403 for (i = 0; i <= IDLE_WORKLOAD; i++) \
404 for (j = 0, st = i < IDLE_WORKLOAD ? &cfqg->service_trees[i][j]\
405 : &cfqg->service_tree_idle; \
406 (i < IDLE_WORKLOAD && j <= SYNC_WORKLOAD) || \
407 (i == IDLE_WORKLOAD && j == 0); \
408 j++, st = i < IDLE_WORKLOAD ? \
409 &cfqg->service_trees[i][j]: NULL) \
410
f5f2b6ce
SL
411static inline bool cfq_io_thinktime_big(struct cfq_data *cfqd,
412 struct cfq_ttime *ttime, bool group_idle)
413{
414 unsigned long slice;
415 if (!sample_valid(ttime->ttime_samples))
416 return false;
417 if (group_idle)
418 slice = cfqd->cfq_group_idle;
419 else
420 slice = cfqd->cfq_slice_idle;
421 return ttime->ttime_mean > slice;
422}
615f0259 423
02b35081
VG
424static inline bool iops_mode(struct cfq_data *cfqd)
425{
426 /*
427 * If we are not idling on queues and it is a NCQ drive, parallel
428 * execution of requests is on and measuring time is not possible
429 * in most of the cases until and unless we drive shallower queue
430 * depths and that becomes a performance bottleneck. In such cases
431 * switch to start providing fairness in terms of number of IOs.
432 */
433 if (!cfqd->cfq_slice_idle && cfqd->hw_tag)
434 return true;
435 else
436 return false;
437}
438
c0324a02
CZ
439static inline enum wl_prio_t cfqq_prio(struct cfq_queue *cfqq)
440{
441 if (cfq_class_idle(cfqq))
442 return IDLE_WORKLOAD;
443 if (cfq_class_rt(cfqq))
444 return RT_WORKLOAD;
445 return BE_WORKLOAD;
446}
447
718eee05
CZ
448
449static enum wl_type_t cfqq_type(struct cfq_queue *cfqq)
450{
451 if (!cfq_cfqq_sync(cfqq))
452 return ASYNC_WORKLOAD;
453 if (!cfq_cfqq_idle_window(cfqq))
454 return SYNC_NOIDLE_WORKLOAD;
455 return SYNC_WORKLOAD;
456}
457
58ff82f3
VG
458static inline int cfq_group_busy_queues_wl(enum wl_prio_t wl,
459 struct cfq_data *cfqd,
460 struct cfq_group *cfqg)
c0324a02
CZ
461{
462 if (wl == IDLE_WORKLOAD)
cdb16e8f 463 return cfqg->service_tree_idle.count;
c0324a02 464
cdb16e8f
VG
465 return cfqg->service_trees[wl][ASYNC_WORKLOAD].count
466 + cfqg->service_trees[wl][SYNC_NOIDLE_WORKLOAD].count
467 + cfqg->service_trees[wl][SYNC_WORKLOAD].count;
c0324a02
CZ
468}
469
f26bd1f0
VG
470static inline int cfqg_busy_async_queues(struct cfq_data *cfqd,
471 struct cfq_group *cfqg)
472{
473 return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count
474 + cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count;
475}
476
165125e1 477static void cfq_dispatch_insert(struct request_queue *, struct request *);
a6151c3a 478static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool,
fd0928df 479 struct io_context *, gfp_t);
91fac317 480
c5869807
TH
481static inline struct cfq_io_cq *icq_to_cic(struct io_cq *icq)
482{
483 /* cic->icq is the first member, %NULL will convert to %NULL */
484 return container_of(icq, struct cfq_io_cq, icq);
485}
486
47fdd4ca
TH
487static inline struct cfq_io_cq *cfq_cic_lookup(struct cfq_data *cfqd,
488 struct io_context *ioc)
489{
490 if (ioc)
491 return icq_to_cic(ioc_lookup_icq(ioc, cfqd->queue));
492 return NULL;
493}
494
c5869807 495static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_cq *cic, bool is_sync)
91fac317 496{
a6151c3a 497 return cic->cfqq[is_sync];
91fac317
VT
498}
499
c5869807
TH
500static inline void cic_set_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq,
501 bool is_sync)
91fac317 502{
a6151c3a 503 cic->cfqq[is_sync] = cfqq;
91fac317
VT
504}
505
c5869807 506static inline struct cfq_data *cic_to_cfqd(struct cfq_io_cq *cic)
bca4b914 507{
c5869807 508 return cic->icq.q->elevator->elevator_data;
bca4b914
KK
509}
510
91fac317
VT
511/*
512 * We regard a request as SYNC, if it's either a read or has the SYNC bit
513 * set (in which case it could also be direct WRITE).
514 */
a6151c3a 515static inline bool cfq_bio_sync(struct bio *bio)
91fac317 516{
7b6d91da 517 return bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC);
91fac317 518}
1da177e4 519
99f95e52
AM
520/*
521 * scheduler run of queue, if there are requests pending and no one in the
522 * driver that will restart queueing
523 */
23e018a1 524static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
99f95e52 525{
7b679138
JA
526 if (cfqd->busy_queues) {
527 cfq_log(cfqd, "schedule dispatch");
23e018a1 528 kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work);
7b679138 529 }
99f95e52
AM
530}
531
44f7c160
JA
532/*
533 * Scale schedule slice based on io priority. Use the sync time slice only
534 * if a queue is marked sync and has sync io queued. A sync queue with async
535 * io only, should not get full sync slice length.
536 */
a6151c3a 537static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync,
d9e7620e 538 unsigned short prio)
44f7c160 539{
d9e7620e 540 const int base_slice = cfqd->cfq_slice[sync];
44f7c160 541
d9e7620e
JA
542 WARN_ON(prio >= IOPRIO_BE_NR);
543
544 return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio));
545}
44f7c160 546
d9e7620e
JA
547static inline int
548cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
549{
550 return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
44f7c160
JA
551}
552
25bc6b07
VG
553static inline u64 cfq_scale_slice(unsigned long delta, struct cfq_group *cfqg)
554{
555 u64 d = delta << CFQ_SERVICE_SHIFT;
556
557 d = d * BLKIO_WEIGHT_DEFAULT;
558 do_div(d, cfqg->weight);
559 return d;
560}
561
562static inline u64 max_vdisktime(u64 min_vdisktime, u64 vdisktime)
563{
564 s64 delta = (s64)(vdisktime - min_vdisktime);
565 if (delta > 0)
566 min_vdisktime = vdisktime;
567
568 return min_vdisktime;
569}
570
571static inline u64 min_vdisktime(u64 min_vdisktime, u64 vdisktime)
572{
573 s64 delta = (s64)(vdisktime - min_vdisktime);
574 if (delta < 0)
575 min_vdisktime = vdisktime;
576
577 return min_vdisktime;
578}
579
580static void update_min_vdisktime(struct cfq_rb_root *st)
581{
25bc6b07
VG
582 struct cfq_group *cfqg;
583
25bc6b07
VG
584 if (st->left) {
585 cfqg = rb_entry_cfqg(st->left);
a6032710
GJ
586 st->min_vdisktime = max_vdisktime(st->min_vdisktime,
587 cfqg->vdisktime);
25bc6b07 588 }
25bc6b07
VG
589}
590
5db5d642
CZ
591/*
592 * get averaged number of queues of RT/BE priority.
593 * average is updated, with a formula that gives more weight to higher numbers,
594 * to quickly follows sudden increases and decrease slowly
595 */
596
58ff82f3
VG
597static inline unsigned cfq_group_get_avg_queues(struct cfq_data *cfqd,
598 struct cfq_group *cfqg, bool rt)
5869619c 599{
5db5d642
CZ
600 unsigned min_q, max_q;
601 unsigned mult = cfq_hist_divisor - 1;
602 unsigned round = cfq_hist_divisor / 2;
58ff82f3 603 unsigned busy = cfq_group_busy_queues_wl(rt, cfqd, cfqg);
5db5d642 604
58ff82f3
VG
605 min_q = min(cfqg->busy_queues_avg[rt], busy);
606 max_q = max(cfqg->busy_queues_avg[rt], busy);
607 cfqg->busy_queues_avg[rt] = (mult * max_q + min_q + round) /
5db5d642 608 cfq_hist_divisor;
58ff82f3
VG
609 return cfqg->busy_queues_avg[rt];
610}
611
612static inline unsigned
613cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg)
614{
615 struct cfq_rb_root *st = &cfqd->grp_service_tree;
616
617 return cfq_target_latency * cfqg->weight / st->total_weight;
5db5d642
CZ
618}
619
c553f8e3 620static inline unsigned
ba5bd520 621cfq_scaled_cfqq_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
44f7c160 622{
5db5d642
CZ
623 unsigned slice = cfq_prio_to_slice(cfqd, cfqq);
624 if (cfqd->cfq_latency) {
58ff82f3
VG
625 /*
626 * interested queues (we consider only the ones with the same
627 * priority class in the cfq group)
628 */
629 unsigned iq = cfq_group_get_avg_queues(cfqd, cfqq->cfqg,
630 cfq_class_rt(cfqq));
5db5d642
CZ
631 unsigned sync_slice = cfqd->cfq_slice[1];
632 unsigned expect_latency = sync_slice * iq;
58ff82f3
VG
633 unsigned group_slice = cfq_group_slice(cfqd, cfqq->cfqg);
634
635 if (expect_latency > group_slice) {
5db5d642
CZ
636 unsigned base_low_slice = 2 * cfqd->cfq_slice_idle;
637 /* scale low_slice according to IO priority
638 * and sync vs async */
639 unsigned low_slice =
640 min(slice, base_low_slice * slice / sync_slice);
641 /* the adapted slice value is scaled to fit all iqs
642 * into the target latency */
58ff82f3 643 slice = max(slice * group_slice / expect_latency,
5db5d642
CZ
644 low_slice);
645 }
646 }
c553f8e3
SL
647 return slice;
648}
649
650static inline void
651cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
652{
ba5bd520 653 unsigned slice = cfq_scaled_cfqq_slice(cfqd, cfqq);
c553f8e3 654
dae739eb 655 cfqq->slice_start = jiffies;
5db5d642 656 cfqq->slice_end = jiffies + slice;
f75edf2d 657 cfqq->allocated_slice = slice;
7b679138 658 cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies);
44f7c160
JA
659}
660
661/*
662 * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
663 * isn't valid until the first request from the dispatch is activated
664 * and the slice time set.
665 */
a6151c3a 666static inline bool cfq_slice_used(struct cfq_queue *cfqq)
44f7c160
JA
667{
668 if (cfq_cfqq_slice_new(cfqq))
c1e44756 669 return false;
44f7c160 670 if (time_before(jiffies, cfqq->slice_end))
c1e44756 671 return false;
44f7c160 672
c1e44756 673 return true;
44f7c160
JA
674}
675
1da177e4 676/*
5e705374 677 * Lifted from AS - choose which of rq1 and rq2 that is best served now.
1da177e4 678 * We choose the request that is closest to the head right now. Distance
e8a99053 679 * behind the head is penalized and only allowed to a certain extent.
1da177e4 680 */
5e705374 681static struct request *
cf7c25cf 682cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2, sector_t last)
1da177e4 683{
cf7c25cf 684 sector_t s1, s2, d1 = 0, d2 = 0;
1da177e4 685 unsigned long back_max;
e8a99053
AM
686#define CFQ_RQ1_WRAP 0x01 /* request 1 wraps */
687#define CFQ_RQ2_WRAP 0x02 /* request 2 wraps */
688 unsigned wrap = 0; /* bit mask: requests behind the disk head? */
1da177e4 689
5e705374
JA
690 if (rq1 == NULL || rq1 == rq2)
691 return rq2;
692 if (rq2 == NULL)
693 return rq1;
9c2c38a1 694
229836bd
NK
695 if (rq_is_sync(rq1) != rq_is_sync(rq2))
696 return rq_is_sync(rq1) ? rq1 : rq2;
697
65299a3b
CH
698 if ((rq1->cmd_flags ^ rq2->cmd_flags) & REQ_PRIO)
699 return rq1->cmd_flags & REQ_PRIO ? rq1 : rq2;
b53d1ed7 700
83096ebf
TH
701 s1 = blk_rq_pos(rq1);
702 s2 = blk_rq_pos(rq2);
1da177e4 703
1da177e4
LT
704 /*
705 * by definition, 1KiB is 2 sectors
706 */
707 back_max = cfqd->cfq_back_max * 2;
708
709 /*
710 * Strict one way elevator _except_ in the case where we allow
711 * short backward seeks which are biased as twice the cost of a
712 * similar forward seek.
713 */
714 if (s1 >= last)
715 d1 = s1 - last;
716 else if (s1 + back_max >= last)
717 d1 = (last - s1) * cfqd->cfq_back_penalty;
718 else
e8a99053 719 wrap |= CFQ_RQ1_WRAP;
1da177e4
LT
720
721 if (s2 >= last)
722 d2 = s2 - last;
723 else if (s2 + back_max >= last)
724 d2 = (last - s2) * cfqd->cfq_back_penalty;
725 else
e8a99053 726 wrap |= CFQ_RQ2_WRAP;
1da177e4
LT
727
728 /* Found required data */
e8a99053
AM
729
730 /*
731 * By doing switch() on the bit mask "wrap" we avoid having to
732 * check two variables for all permutations: --> faster!
733 */
734 switch (wrap) {
5e705374 735 case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
e8a99053 736 if (d1 < d2)
5e705374 737 return rq1;
e8a99053 738 else if (d2 < d1)
5e705374 739 return rq2;
e8a99053
AM
740 else {
741 if (s1 >= s2)
5e705374 742 return rq1;
e8a99053 743 else
5e705374 744 return rq2;
e8a99053 745 }
1da177e4 746
e8a99053 747 case CFQ_RQ2_WRAP:
5e705374 748 return rq1;
e8a99053 749 case CFQ_RQ1_WRAP:
5e705374
JA
750 return rq2;
751 case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
e8a99053
AM
752 default:
753 /*
754 * Since both rqs are wrapped,
755 * start with the one that's further behind head
756 * (--> only *one* back seek required),
757 * since back seek takes more time than forward.
758 */
759 if (s1 <= s2)
5e705374 760 return rq1;
1da177e4 761 else
5e705374 762 return rq2;
1da177e4
LT
763 }
764}
765
498d3aa2
JA
766/*
767 * The below is leftmost cache rbtree addon
768 */
0871714e 769static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root)
cc09e299 770{
615f0259
VG
771 /* Service tree is empty */
772 if (!root->count)
773 return NULL;
774
cc09e299
JA
775 if (!root->left)
776 root->left = rb_first(&root->rb);
777
0871714e
JA
778 if (root->left)
779 return rb_entry(root->left, struct cfq_queue, rb_node);
780
781 return NULL;
cc09e299
JA
782}
783
1fa8f6d6
VG
784static struct cfq_group *cfq_rb_first_group(struct cfq_rb_root *root)
785{
786 if (!root->left)
787 root->left = rb_first(&root->rb);
788
789 if (root->left)
790 return rb_entry_cfqg(root->left);
791
792 return NULL;
793}
794
a36e71f9
JA
795static void rb_erase_init(struct rb_node *n, struct rb_root *root)
796{
797 rb_erase(n, root);
798 RB_CLEAR_NODE(n);
799}
800
cc09e299
JA
801static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
802{
803 if (root->left == n)
804 root->left = NULL;
a36e71f9 805 rb_erase_init(n, &root->rb);
aa6f6a3d 806 --root->count;
cc09e299
JA
807}
808
1da177e4
LT
809/*
810 * would be nice to take fifo expire time into account as well
811 */
5e705374
JA
812static struct request *
813cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
814 struct request *last)
1da177e4 815{
21183b07
JA
816 struct rb_node *rbnext = rb_next(&last->rb_node);
817 struct rb_node *rbprev = rb_prev(&last->rb_node);
5e705374 818 struct request *next = NULL, *prev = NULL;
1da177e4 819
21183b07 820 BUG_ON(RB_EMPTY_NODE(&last->rb_node));
1da177e4
LT
821
822 if (rbprev)
5e705374 823 prev = rb_entry_rq(rbprev);
1da177e4 824
21183b07 825 if (rbnext)
5e705374 826 next = rb_entry_rq(rbnext);
21183b07
JA
827 else {
828 rbnext = rb_first(&cfqq->sort_list);
829 if (rbnext && rbnext != &last->rb_node)
5e705374 830 next = rb_entry_rq(rbnext);
21183b07 831 }
1da177e4 832
cf7c25cf 833 return cfq_choose_req(cfqd, next, prev, blk_rq_pos(last));
1da177e4
LT
834}
835
d9e7620e
JA
836static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
837 struct cfq_queue *cfqq)
1da177e4 838{
d9e7620e
JA
839 /*
840 * just an approximation, should be ok.
841 */
cdb16e8f 842 return (cfqq->cfqg->nr_cfqq - 1) * (cfq_prio_slice(cfqd, 1, 0) -
464191c6 843 cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio));
d9e7620e
JA
844}
845
1fa8f6d6
VG
846static inline s64
847cfqg_key(struct cfq_rb_root *st, struct cfq_group *cfqg)
848{
849 return cfqg->vdisktime - st->min_vdisktime;
850}
851
852static void
853__cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
854{
855 struct rb_node **node = &st->rb.rb_node;
856 struct rb_node *parent = NULL;
857 struct cfq_group *__cfqg;
858 s64 key = cfqg_key(st, cfqg);
859 int left = 1;
860
861 while (*node != NULL) {
862 parent = *node;
863 __cfqg = rb_entry_cfqg(parent);
864
865 if (key < cfqg_key(st, __cfqg))
866 node = &parent->rb_left;
867 else {
868 node = &parent->rb_right;
869 left = 0;
870 }
871 }
872
873 if (left)
874 st->left = &cfqg->rb_node;
875
876 rb_link_node(&cfqg->rb_node, parent, node);
877 rb_insert_color(&cfqg->rb_node, &st->rb);
878}
879
880static void
8184f93e
JT
881cfq_update_group_weight(struct cfq_group *cfqg)
882{
883 BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
884 if (cfqg->needs_update) {
885 cfqg->weight = cfqg->new_weight;
886 cfqg->needs_update = false;
887 }
888}
889
890static void
891cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
892{
893 BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
894
895 cfq_update_group_weight(cfqg);
896 __cfq_group_service_tree_add(st, cfqg);
897 st->total_weight += cfqg->weight;
898}
899
900static void
901cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
1fa8f6d6
VG
902{
903 struct cfq_rb_root *st = &cfqd->grp_service_tree;
904 struct cfq_group *__cfqg;
905 struct rb_node *n;
906
907 cfqg->nr_cfqq++;
760701bf 908 if (!RB_EMPTY_NODE(&cfqg->rb_node))
1fa8f6d6
VG
909 return;
910
911 /*
912 * Currently put the group at the end. Later implement something
913 * so that groups get lesser vtime based on their weights, so that
25985edc 914 * if group does not loose all if it was not continuously backlogged.
1fa8f6d6
VG
915 */
916 n = rb_last(&st->rb);
917 if (n) {
918 __cfqg = rb_entry_cfqg(n);
919 cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY;
920 } else
921 cfqg->vdisktime = st->min_vdisktime;
8184f93e
JT
922 cfq_group_service_tree_add(st, cfqg);
923}
1fa8f6d6 924
8184f93e
JT
925static void
926cfq_group_service_tree_del(struct cfq_rb_root *st, struct cfq_group *cfqg)
927{
928 st->total_weight -= cfqg->weight;
929 if (!RB_EMPTY_NODE(&cfqg->rb_node))
930 cfq_rb_erase(&cfqg->rb_node, st);
1fa8f6d6
VG
931}
932
933static void
8184f93e 934cfq_group_notify_queue_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
1fa8f6d6
VG
935{
936 struct cfq_rb_root *st = &cfqd->grp_service_tree;
937
938 BUG_ON(cfqg->nr_cfqq < 1);
939 cfqg->nr_cfqq--;
25bc6b07 940
1fa8f6d6
VG
941 /* If there are other cfq queues under this group, don't delete it */
942 if (cfqg->nr_cfqq)
943 return;
944
2868ef7b 945 cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
8184f93e 946 cfq_group_service_tree_del(st, cfqg);
dae739eb 947 cfqg->saved_workload_slice = 0;
c1768268
TH
948 cfq_blkiocg_update_dequeue_stats(cfqg_to_blkg(cfqg),
949 &blkio_policy_cfq, 1);
dae739eb
VG
950}
951
167400d3
JT
952static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq,
953 unsigned int *unaccounted_time)
dae739eb 954{
f75edf2d 955 unsigned int slice_used;
dae739eb
VG
956
957 /*
958 * Queue got expired before even a single request completed or
959 * got expired immediately after first request completion.
960 */
961 if (!cfqq->slice_start || cfqq->slice_start == jiffies) {
962 /*
963 * Also charge the seek time incurred to the group, otherwise
964 * if there are mutiple queues in the group, each can dispatch
965 * a single request on seeky media and cause lots of seek time
966 * and group will never know it.
967 */
968 slice_used = max_t(unsigned, (jiffies - cfqq->dispatch_start),
969 1);
970 } else {
971 slice_used = jiffies - cfqq->slice_start;
167400d3
JT
972 if (slice_used > cfqq->allocated_slice) {
973 *unaccounted_time = slice_used - cfqq->allocated_slice;
f75edf2d 974 slice_used = cfqq->allocated_slice;
167400d3
JT
975 }
976 if (time_after(cfqq->slice_start, cfqq->dispatch_start))
977 *unaccounted_time += cfqq->slice_start -
978 cfqq->dispatch_start;
dae739eb
VG
979 }
980
dae739eb
VG
981 return slice_used;
982}
983
984static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
e5ff082e 985 struct cfq_queue *cfqq)
dae739eb
VG
986{
987 struct cfq_rb_root *st = &cfqd->grp_service_tree;
167400d3 988 unsigned int used_sl, charge, unaccounted_sl = 0;
f26bd1f0
VG
989 int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
990 - cfqg->service_tree_idle.count;
991
992 BUG_ON(nr_sync < 0);
167400d3 993 used_sl = charge = cfq_cfqq_slice_usage(cfqq, &unaccounted_sl);
dae739eb 994
02b35081
VG
995 if (iops_mode(cfqd))
996 charge = cfqq->slice_dispatch;
997 else if (!cfq_cfqq_sync(cfqq) && !nr_sync)
998 charge = cfqq->allocated_slice;
dae739eb
VG
999
1000 /* Can't update vdisktime while group is on service tree */
8184f93e 1001 cfq_group_service_tree_del(st, cfqg);
02b35081 1002 cfqg->vdisktime += cfq_scale_slice(charge, cfqg);
8184f93e
JT
1003 /* If a new weight was requested, update now, off tree */
1004 cfq_group_service_tree_add(st, cfqg);
dae739eb
VG
1005
1006 /* This group is being expired. Save the context */
1007 if (time_after(cfqd->workload_expires, jiffies)) {
1008 cfqg->saved_workload_slice = cfqd->workload_expires
1009 - jiffies;
1010 cfqg->saved_workload = cfqd->serving_type;
1011 cfqg->saved_serving_prio = cfqd->serving_prio;
1012 } else
1013 cfqg->saved_workload_slice = 0;
2868ef7b
VG
1014
1015 cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
1016 st->min_vdisktime);
fd16d263
JP
1017 cfq_log_cfqq(cfqq->cfqd, cfqq,
1018 "sl_used=%u disp=%u charge=%u iops=%u sect=%lu",
1019 used_sl, cfqq->slice_dispatch, charge,
1020 iops_mode(cfqd), cfqq->nr_sectors);
c1768268
TH
1021 cfq_blkiocg_update_timeslice_used(cfqg_to_blkg(cfqg), &blkio_policy_cfq,
1022 used_sl, unaccounted_sl);
1023 cfq_blkiocg_set_start_empty_time(cfqg_to_blkg(cfqg), &blkio_policy_cfq);
1fa8f6d6
VG
1024}
1025
f51b802c
TH
1026/**
1027 * cfq_init_cfqg_base - initialize base part of a cfq_group
1028 * @cfqg: cfq_group to initialize
1029 *
1030 * Initialize the base part which is used whether %CONFIG_CFQ_GROUP_IOSCHED
1031 * is enabled or not.
1032 */
1033static void cfq_init_cfqg_base(struct cfq_group *cfqg)
1034{
1035 struct cfq_rb_root *st;
1036 int i, j;
1037
1038 for_each_cfqg_st(cfqg, i, j, st)
1039 *st = CFQ_RB_ROOT;
1040 RB_CLEAR_NODE(&cfqg->rb_node);
1041
1042 cfqg->ttime.last_end_request = jiffies;
1043}
1044
25fb5169 1045#ifdef CONFIG_CFQ_GROUP_IOSCHED
ca32aefc
TH
1046static void cfq_update_blkio_group_weight(struct request_queue *q,
1047 struct blkio_group *blkg,
8aea4545 1048 unsigned int weight)
f8d461d6 1049{
0381411e
TH
1050 struct cfq_group *cfqg = blkg_to_cfqg(blkg);
1051
8184f93e
JT
1052 cfqg->new_weight = weight;
1053 cfqg->needs_update = true;
f8d461d6
VG
1054}
1055
cd1604fa
TH
1056static void cfq_link_blkio_group(struct request_queue *q,
1057 struct blkio_group *blkg)
25fb5169 1058{
cd1604fa 1059 struct cfq_data *cfqd = q->elevator->elevator_data;
0381411e 1060 struct cfq_group *cfqg = blkg_to_cfqg(blkg);
f469a7b4
VG
1061
1062 cfqd->nr_blkcg_linked_grps++;
f469a7b4
VG
1063
1064 /* Add group on cfqd list */
1065 hlist_add_head(&cfqg->cfqd_node, &cfqd->cfqg_list);
1066}
1067
0381411e 1068static void cfq_init_blkio_group(struct blkio_group *blkg)
f469a7b4 1069{
0381411e 1070 struct cfq_group *cfqg = blkg_to_cfqg(blkg);
25fb5169 1071
f51b802c 1072 cfq_init_cfqg_base(cfqg);
0381411e 1073 cfqg->weight = blkg->blkcg->weight;
25fb5169
VG
1074}
1075
1076/*
3e59cf9d
VG
1077 * Search for the cfq group current task belongs to. request_queue lock must
1078 * be held.
25fb5169 1079 */
cd1604fa
TH
1080static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd,
1081 struct blkio_cgroup *blkcg)
25fb5169 1082{
f469a7b4 1083 struct request_queue *q = cfqd->queue;
cd1604fa 1084 struct cfq_group *cfqg = NULL;
25fb5169 1085
cd1604fa
TH
1086 /* avoid lookup for the common case where there's no blkio cgroup */
1087 if (blkcg == &blkio_root_cgroup) {
1088 cfqg = cfqd->root_group;
1089 } else {
1090 struct blkio_group *blkg;
f469a7b4 1091
cd1604fa
TH
1092 blkg = blkg_lookup_create(blkcg, q, BLKIO_POLICY_PROP, false);
1093 if (!IS_ERR(blkg))
0381411e 1094 cfqg = blkg_to_cfqg(blkg);
cd1604fa 1095 }
f469a7b4 1096
25fb5169
VG
1097 return cfqg;
1098}
1099
1100static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
1101{
1102 /* Currently, all async queues are mapped to root group */
1103 if (!cfq_cfqq_sync(cfqq))
f51b802c 1104 cfqg = cfqq->cfqd->root_group;
25fb5169
VG
1105
1106 cfqq->cfqg = cfqg;
b1c35769 1107 /* cfqq reference on cfqg */
1adaf3dd 1108 blkg_get(cfqg_to_blkg(cfqg));
b1c35769
VG
1109}
1110
1111static void cfq_destroy_cfqg(struct cfq_data *cfqd, struct cfq_group *cfqg)
1112{
1113 /* Something wrong if we are trying to remove same group twice */
1114 BUG_ON(hlist_unhashed(&cfqg->cfqd_node));
1115
1116 hlist_del_init(&cfqg->cfqd_node);
1117
a5395b83
VG
1118 BUG_ON(cfqd->nr_blkcg_linked_grps <= 0);
1119 cfqd->nr_blkcg_linked_grps--;
1120
b1c35769
VG
1121 /*
1122 * Put the reference taken at the time of creation so that when all
1123 * queues are gone, group can be destroyed.
1124 */
1adaf3dd 1125 blkg_put(cfqg_to_blkg(cfqg));
b1c35769
VG
1126}
1127
72e06c25 1128static bool cfq_release_cfq_groups(struct cfq_data *cfqd)
b1c35769
VG
1129{
1130 struct hlist_node *pos, *n;
1131 struct cfq_group *cfqg;
72e06c25 1132 bool empty = true;
b1c35769
VG
1133
1134 hlist_for_each_entry_safe(cfqg, pos, n, &cfqd->cfqg_list, cfqd_node) {
1135 /*
1136 * If cgroup removal path got to blk_group first and removed
1137 * it from cgroup list, then it will take care of destroying
1138 * cfqg also.
1139 */
0381411e 1140 if (!cfq_blkiocg_del_blkio_group(cfqg_to_blkg(cfqg)))
b1c35769 1141 cfq_destroy_cfqg(cfqd, cfqg);
72e06c25
TH
1142 else
1143 empty = false;
b1c35769 1144 }
72e06c25 1145 return empty;
25fb5169 1146}
b1c35769
VG
1147
1148/*
1149 * Blk cgroup controller notification saying that blkio_group object is being
1150 * delinked as associated cgroup object is going away. That also means that
1151 * no new IO will come in this group. So get rid of this group as soon as
1152 * any pending IO in the group is finished.
1153 *
1154 * This function is called under rcu_read_lock(). key is the rcu protected
ca32aefc
TH
1155 * pointer. That means @q is a valid request_queue pointer as long as we
1156 * are rcu read lock.
b1c35769 1157 *
ca32aefc 1158 * @q was fetched from blkio_group under blkio_cgroup->lock. That means
b1c35769
VG
1159 * it should not be NULL as even if elevator was exiting, cgroup deltion
1160 * path got to it first.
1161 */
ca32aefc
TH
1162static void cfq_unlink_blkio_group(struct request_queue *q,
1163 struct blkio_group *blkg)
b1c35769 1164{
ca32aefc
TH
1165 struct cfq_data *cfqd = q->elevator->elevator_data;
1166 unsigned long flags;
b1c35769 1167
ca32aefc 1168 spin_lock_irqsave(q->queue_lock, flags);
0381411e 1169 cfq_destroy_cfqg(cfqd, blkg_to_cfqg(blkg));
ca32aefc 1170 spin_unlock_irqrestore(q->queue_lock, flags);
b1c35769
VG
1171}
1172
72e06c25
TH
1173static struct elevator_type iosched_cfq;
1174
1175static bool cfq_clear_queue(struct request_queue *q)
1176{
1177 lockdep_assert_held(q->queue_lock);
1178
1179 /* shoot down blkgs iff the current elevator is cfq */
1180 if (!q->elevator || q->elevator->type != &iosched_cfq)
1181 return true;
1182
1183 return cfq_release_cfq_groups(q->elevator->elevator_data);
1184}
1185
25fb5169 1186#else /* GROUP_IOSCHED */
cd1604fa
TH
1187static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd,
1188 struct blkio_cgroup *blkcg)
25fb5169 1189{
f51b802c 1190 return cfqd->root_group;
25fb5169 1191}
7f1dc8a2 1192
25fb5169
VG
1193static inline void
1194cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) {
1195 cfqq->cfqg = cfqg;
1196}
1197
b1c35769 1198static void cfq_release_cfq_groups(struct cfq_data *cfqd) {}
b1c35769 1199
25fb5169
VG
1200#endif /* GROUP_IOSCHED */
1201
498d3aa2 1202/*
c0324a02 1203 * The cfqd->service_trees holds all pending cfq_queue's that have
498d3aa2
JA
1204 * requests waiting to be processed. It is sorted in the order that
1205 * we will service the queues.
1206 */
a36e71f9 1207static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
a6151c3a 1208 bool add_front)
d9e7620e 1209{
0871714e
JA
1210 struct rb_node **p, *parent;
1211 struct cfq_queue *__cfqq;
d9e7620e 1212 unsigned long rb_key;
c0324a02 1213 struct cfq_rb_root *service_tree;
498d3aa2 1214 int left;
dae739eb 1215 int new_cfqq = 1;
ae30c286 1216
cdb16e8f 1217 service_tree = service_tree_for(cfqq->cfqg, cfqq_prio(cfqq),
65b32a57 1218 cfqq_type(cfqq));
0871714e
JA
1219 if (cfq_class_idle(cfqq)) {
1220 rb_key = CFQ_IDLE_DELAY;
aa6f6a3d 1221 parent = rb_last(&service_tree->rb);
0871714e
JA
1222 if (parent && parent != &cfqq->rb_node) {
1223 __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
1224 rb_key += __cfqq->rb_key;
1225 } else
1226 rb_key += jiffies;
1227 } else if (!add_front) {
b9c8946b
JA
1228 /*
1229 * Get our rb key offset. Subtract any residual slice
1230 * value carried from last service. A negative resid
1231 * count indicates slice overrun, and this should position
1232 * the next service time further away in the tree.
1233 */
edd75ffd 1234 rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
b9c8946b 1235 rb_key -= cfqq->slice_resid;
edd75ffd 1236 cfqq->slice_resid = 0;
48e025e6
CZ
1237 } else {
1238 rb_key = -HZ;
aa6f6a3d 1239 __cfqq = cfq_rb_first(service_tree);
48e025e6
CZ
1240 rb_key += __cfqq ? __cfqq->rb_key : jiffies;
1241 }
1da177e4 1242
d9e7620e 1243 if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
dae739eb 1244 new_cfqq = 0;
99f9628a 1245 /*
d9e7620e 1246 * same position, nothing more to do
99f9628a 1247 */
c0324a02
CZ
1248 if (rb_key == cfqq->rb_key &&
1249 cfqq->service_tree == service_tree)
d9e7620e 1250 return;
1da177e4 1251
aa6f6a3d
CZ
1252 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
1253 cfqq->service_tree = NULL;
1da177e4 1254 }
d9e7620e 1255
498d3aa2 1256 left = 1;
0871714e 1257 parent = NULL;
aa6f6a3d
CZ
1258 cfqq->service_tree = service_tree;
1259 p = &service_tree->rb.rb_node;
d9e7620e 1260 while (*p) {
67060e37 1261 struct rb_node **n;
cc09e299 1262
d9e7620e
JA
1263 parent = *p;
1264 __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
1265
0c534e0a 1266 /*
c0324a02 1267 * sort by key, that represents service time.
0c534e0a 1268 */
c0324a02 1269 if (time_before(rb_key, __cfqq->rb_key))
67060e37 1270 n = &(*p)->rb_left;
c0324a02 1271 else {
67060e37 1272 n = &(*p)->rb_right;
cc09e299 1273 left = 0;
c0324a02 1274 }
67060e37
JA
1275
1276 p = n;
d9e7620e
JA
1277 }
1278
cc09e299 1279 if (left)
aa6f6a3d 1280 service_tree->left = &cfqq->rb_node;
cc09e299 1281
d9e7620e
JA
1282 cfqq->rb_key = rb_key;
1283 rb_link_node(&cfqq->rb_node, parent, p);
aa6f6a3d
CZ
1284 rb_insert_color(&cfqq->rb_node, &service_tree->rb);
1285 service_tree->count++;
20359f27 1286 if (add_front || !new_cfqq)
dae739eb 1287 return;
8184f93e 1288 cfq_group_notify_queue_add(cfqd, cfqq->cfqg);
1da177e4
LT
1289}
1290
a36e71f9 1291static struct cfq_queue *
f2d1f0ae
JA
1292cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root,
1293 sector_t sector, struct rb_node **ret_parent,
1294 struct rb_node ***rb_link)
a36e71f9 1295{
a36e71f9
JA
1296 struct rb_node **p, *parent;
1297 struct cfq_queue *cfqq = NULL;
1298
1299 parent = NULL;
1300 p = &root->rb_node;
1301 while (*p) {
1302 struct rb_node **n;
1303
1304 parent = *p;
1305 cfqq = rb_entry(parent, struct cfq_queue, p_node);
1306
1307 /*
1308 * Sort strictly based on sector. Smallest to the left,
1309 * largest to the right.
1310 */
2e46e8b2 1311 if (sector > blk_rq_pos(cfqq->next_rq))
a36e71f9 1312 n = &(*p)->rb_right;
2e46e8b2 1313 else if (sector < blk_rq_pos(cfqq->next_rq))
a36e71f9
JA
1314 n = &(*p)->rb_left;
1315 else
1316 break;
1317 p = n;
3ac6c9f8 1318 cfqq = NULL;
a36e71f9
JA
1319 }
1320
1321 *ret_parent = parent;
1322 if (rb_link)
1323 *rb_link = p;
3ac6c9f8 1324 return cfqq;
a36e71f9
JA
1325}
1326
1327static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1328{
a36e71f9
JA
1329 struct rb_node **p, *parent;
1330 struct cfq_queue *__cfqq;
1331
f2d1f0ae
JA
1332 if (cfqq->p_root) {
1333 rb_erase(&cfqq->p_node, cfqq->p_root);
1334 cfqq->p_root = NULL;
1335 }
a36e71f9
JA
1336
1337 if (cfq_class_idle(cfqq))
1338 return;
1339 if (!cfqq->next_rq)
1340 return;
1341
f2d1f0ae 1342 cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio];
2e46e8b2
TH
1343 __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root,
1344 blk_rq_pos(cfqq->next_rq), &parent, &p);
3ac6c9f8
JA
1345 if (!__cfqq) {
1346 rb_link_node(&cfqq->p_node, parent, p);
f2d1f0ae
JA
1347 rb_insert_color(&cfqq->p_node, cfqq->p_root);
1348 } else
1349 cfqq->p_root = NULL;
a36e71f9
JA
1350}
1351
498d3aa2
JA
1352/*
1353 * Update cfqq's position in the service tree.
1354 */
edd75ffd 1355static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
6d048f53 1356{
6d048f53
JA
1357 /*
1358 * Resorting requires the cfqq to be on the RR list already.
1359 */
a36e71f9 1360 if (cfq_cfqq_on_rr(cfqq)) {
edd75ffd 1361 cfq_service_tree_add(cfqd, cfqq, 0);
a36e71f9
JA
1362 cfq_prio_tree_add(cfqd, cfqq);
1363 }
6d048f53
JA
1364}
1365
1da177e4
LT
1366/*
1367 * add to busy list of queues for service, trying to be fair in ordering
22e2c507 1368 * the pending list according to last request service
1da177e4 1369 */
febffd61 1370static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1da177e4 1371{
7b679138 1372 cfq_log_cfqq(cfqd, cfqq, "add_to_rr");
3b18152c
JA
1373 BUG_ON(cfq_cfqq_on_rr(cfqq));
1374 cfq_mark_cfqq_on_rr(cfqq);
1da177e4 1375 cfqd->busy_queues++;
ef8a41df
SL
1376 if (cfq_cfqq_sync(cfqq))
1377 cfqd->busy_sync_queues++;
1da177e4 1378
edd75ffd 1379 cfq_resort_rr_list(cfqd, cfqq);
1da177e4
LT
1380}
1381
498d3aa2
JA
1382/*
1383 * Called when the cfqq no longer has requests pending, remove it from
1384 * the service tree.
1385 */
febffd61 1386static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1da177e4 1387{
7b679138 1388 cfq_log_cfqq(cfqd, cfqq, "del_from_rr");
3b18152c
JA
1389 BUG_ON(!cfq_cfqq_on_rr(cfqq));
1390 cfq_clear_cfqq_on_rr(cfqq);
1da177e4 1391
aa6f6a3d
CZ
1392 if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
1393 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
1394 cfqq->service_tree = NULL;
1395 }
f2d1f0ae
JA
1396 if (cfqq->p_root) {
1397 rb_erase(&cfqq->p_node, cfqq->p_root);
1398 cfqq->p_root = NULL;
1399 }
d9e7620e 1400
8184f93e 1401 cfq_group_notify_queue_del(cfqd, cfqq->cfqg);
1da177e4
LT
1402 BUG_ON(!cfqd->busy_queues);
1403 cfqd->busy_queues--;
ef8a41df
SL
1404 if (cfq_cfqq_sync(cfqq))
1405 cfqd->busy_sync_queues--;
1da177e4
LT
1406}
1407
1408/*
1409 * rb tree support functions
1410 */
febffd61 1411static void cfq_del_rq_rb(struct request *rq)
1da177e4 1412{
5e705374 1413 struct cfq_queue *cfqq = RQ_CFQQ(rq);
5e705374 1414 const int sync = rq_is_sync(rq);
1da177e4 1415
b4878f24
JA
1416 BUG_ON(!cfqq->queued[sync]);
1417 cfqq->queued[sync]--;
1da177e4 1418
5e705374 1419 elv_rb_del(&cfqq->sort_list, rq);
1da177e4 1420
f04a6424
VG
1421 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) {
1422 /*
1423 * Queue will be deleted from service tree when we actually
1424 * expire it later. Right now just remove it from prio tree
1425 * as it is empty.
1426 */
1427 if (cfqq->p_root) {
1428 rb_erase(&cfqq->p_node, cfqq->p_root);
1429 cfqq->p_root = NULL;
1430 }
1431 }
1da177e4
LT
1432}
1433
5e705374 1434static void cfq_add_rq_rb(struct request *rq)
1da177e4 1435{
5e705374 1436 struct cfq_queue *cfqq = RQ_CFQQ(rq);
1da177e4 1437 struct cfq_data *cfqd = cfqq->cfqd;
796d5116 1438 struct request *prev;
1da177e4 1439
5380a101 1440 cfqq->queued[rq_is_sync(rq)]++;
1da177e4 1441
796d5116 1442 elv_rb_add(&cfqq->sort_list, rq);
5fccbf61
JA
1443
1444 if (!cfq_cfqq_on_rr(cfqq))
1445 cfq_add_cfqq_rr(cfqd, cfqq);
5044eed4
JA
1446
1447 /*
1448 * check if this request is a better next-serve candidate
1449 */
a36e71f9 1450 prev = cfqq->next_rq;
cf7c25cf 1451 cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq, cfqd->last_position);
a36e71f9
JA
1452
1453 /*
1454 * adjust priority tree position, if ->next_rq changes
1455 */
1456 if (prev != cfqq->next_rq)
1457 cfq_prio_tree_add(cfqd, cfqq);
1458
5044eed4 1459 BUG_ON(!cfqq->next_rq);
1da177e4
LT
1460}
1461
febffd61 1462static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
1da177e4 1463{
5380a101
JA
1464 elv_rb_del(&cfqq->sort_list, rq);
1465 cfqq->queued[rq_is_sync(rq)]--;
0381411e 1466 cfq_blkiocg_update_io_remove_stats(cfqg_to_blkg(RQ_CFQG(rq)),
c1768268
TH
1467 &blkio_policy_cfq, rq_data_dir(rq),
1468 rq_is_sync(rq));
5e705374 1469 cfq_add_rq_rb(rq);
0381411e 1470 cfq_blkiocg_update_io_add_stats(cfqg_to_blkg(RQ_CFQG(rq)),
c1768268 1471 &blkio_policy_cfq,
0381411e
TH
1472 cfqg_to_blkg(cfqq->cfqd->serving_group),
1473 rq_data_dir(rq), rq_is_sync(rq));
1da177e4
LT
1474}
1475
206dc69b
JA
1476static struct request *
1477cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
1da177e4 1478{
206dc69b 1479 struct task_struct *tsk = current;
c5869807 1480 struct cfq_io_cq *cic;
206dc69b 1481 struct cfq_queue *cfqq;
1da177e4 1482
4ac845a2 1483 cic = cfq_cic_lookup(cfqd, tsk->io_context);
91fac317
VT
1484 if (!cic)
1485 return NULL;
1486
1487 cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
89850f7e
JA
1488 if (cfqq) {
1489 sector_t sector = bio->bi_sector + bio_sectors(bio);
1490
21183b07 1491 return elv_rb_find(&cfqq->sort_list, sector);
89850f7e 1492 }
1da177e4 1493
1da177e4
LT
1494 return NULL;
1495}
1496
165125e1 1497static void cfq_activate_request(struct request_queue *q, struct request *rq)
1da177e4 1498{
22e2c507 1499 struct cfq_data *cfqd = q->elevator->elevator_data;
3b18152c 1500
53c583d2 1501 cfqd->rq_in_driver++;
7b679138 1502 cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
53c583d2 1503 cfqd->rq_in_driver);
25776e35 1504
5b93629b 1505 cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
1da177e4
LT
1506}
1507
165125e1 1508static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
1da177e4 1509{
b4878f24
JA
1510 struct cfq_data *cfqd = q->elevator->elevator_data;
1511
53c583d2
CZ
1512 WARN_ON(!cfqd->rq_in_driver);
1513 cfqd->rq_in_driver--;
7b679138 1514 cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d",
53c583d2 1515 cfqd->rq_in_driver);
1da177e4
LT
1516}
1517
b4878f24 1518static void cfq_remove_request(struct request *rq)
1da177e4 1519{
5e705374 1520 struct cfq_queue *cfqq = RQ_CFQQ(rq);
21183b07 1521
5e705374
JA
1522 if (cfqq->next_rq == rq)
1523 cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
1da177e4 1524
b4878f24 1525 list_del_init(&rq->queuelist);
5e705374 1526 cfq_del_rq_rb(rq);
374f84ac 1527
45333d5a 1528 cfqq->cfqd->rq_queued--;
0381411e 1529 cfq_blkiocg_update_io_remove_stats(cfqg_to_blkg(RQ_CFQG(rq)),
c1768268
TH
1530 &blkio_policy_cfq, rq_data_dir(rq),
1531 rq_is_sync(rq));
65299a3b
CH
1532 if (rq->cmd_flags & REQ_PRIO) {
1533 WARN_ON(!cfqq->prio_pending);
1534 cfqq->prio_pending--;
b53d1ed7 1535 }
1da177e4
LT
1536}
1537
165125e1
JA
1538static int cfq_merge(struct request_queue *q, struct request **req,
1539 struct bio *bio)
1da177e4
LT
1540{
1541 struct cfq_data *cfqd = q->elevator->elevator_data;
1542 struct request *__rq;
1da177e4 1543
206dc69b 1544 __rq = cfq_find_rq_fmerge(cfqd, bio);
22e2c507 1545 if (__rq && elv_rq_merge_ok(__rq, bio)) {
9817064b
JA
1546 *req = __rq;
1547 return ELEVATOR_FRONT_MERGE;
1da177e4
LT
1548 }
1549
1550 return ELEVATOR_NO_MERGE;
1da177e4
LT
1551}
1552
165125e1 1553static void cfq_merged_request(struct request_queue *q, struct request *req,
21183b07 1554 int type)
1da177e4 1555{
21183b07 1556 if (type == ELEVATOR_FRONT_MERGE) {
5e705374 1557 struct cfq_queue *cfqq = RQ_CFQQ(req);
1da177e4 1558
5e705374 1559 cfq_reposition_rq_rb(cfqq, req);
1da177e4 1560 }
1da177e4
LT
1561}
1562
812d4026
DS
1563static void cfq_bio_merged(struct request_queue *q, struct request *req,
1564 struct bio *bio)
1565{
0381411e 1566 cfq_blkiocg_update_io_merged_stats(cfqg_to_blkg(RQ_CFQG(req)),
c1768268
TH
1567 &blkio_policy_cfq, bio_data_dir(bio),
1568 cfq_bio_sync(bio));
812d4026
DS
1569}
1570
1da177e4 1571static void
165125e1 1572cfq_merged_requests(struct request_queue *q, struct request *rq,
1da177e4
LT
1573 struct request *next)
1574{
cf7c25cf 1575 struct cfq_queue *cfqq = RQ_CFQQ(rq);
4a0b75c7
SL
1576 struct cfq_data *cfqd = q->elevator->elevator_data;
1577
22e2c507
JA
1578 /*
1579 * reposition in fifo if next is older than rq
1580 */
1581 if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
30996f40 1582 time_before(rq_fifo_time(next), rq_fifo_time(rq))) {
22e2c507 1583 list_move(&rq->queuelist, &next->queuelist);
30996f40
JA
1584 rq_set_fifo_time(rq, rq_fifo_time(next));
1585 }
22e2c507 1586
cf7c25cf
CZ
1587 if (cfqq->next_rq == next)
1588 cfqq->next_rq = rq;
b4878f24 1589 cfq_remove_request(next);
0381411e 1590 cfq_blkiocg_update_io_merged_stats(cfqg_to_blkg(RQ_CFQG(rq)),
c1768268
TH
1591 &blkio_policy_cfq, rq_data_dir(next),
1592 rq_is_sync(next));
4a0b75c7
SL
1593
1594 cfqq = RQ_CFQQ(next);
1595 /*
1596 * all requests of this queue are merged to other queues, delete it
1597 * from the service tree. If it's the active_queue,
1598 * cfq_dispatch_requests() will choose to expire it or do idle
1599 */
1600 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list) &&
1601 cfqq != cfqd->active_queue)
1602 cfq_del_cfqq_rr(cfqd, cfqq);
22e2c507
JA
1603}
1604
165125e1 1605static int cfq_allow_merge(struct request_queue *q, struct request *rq,
da775265
JA
1606 struct bio *bio)
1607{
1608 struct cfq_data *cfqd = q->elevator->elevator_data;
c5869807 1609 struct cfq_io_cq *cic;
da775265 1610 struct cfq_queue *cfqq;
da775265
JA
1611
1612 /*
ec8acb69 1613 * Disallow merge of a sync bio into an async request.
da775265 1614 */
91fac317 1615 if (cfq_bio_sync(bio) && !rq_is_sync(rq))
a6151c3a 1616 return false;
da775265
JA
1617
1618 /*
f1a4f4d3 1619 * Lookup the cfqq that this bio will be queued with and allow
07c2bd37 1620 * merge only if rq is queued there.
f1a4f4d3 1621 */
07c2bd37
TH
1622 cic = cfq_cic_lookup(cfqd, current->io_context);
1623 if (!cic)
1624 return false;
719d3402 1625
91fac317 1626 cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
a6151c3a 1627 return cfqq == RQ_CFQQ(rq);
da775265
JA
1628}
1629
812df48d
DS
1630static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1631{
1632 del_timer(&cfqd->idle_slice_timer);
c1768268
TH
1633 cfq_blkiocg_update_idle_time_stats(cfqg_to_blkg(cfqq->cfqg),
1634 &blkio_policy_cfq);
812df48d
DS
1635}
1636
febffd61
JA
1637static void __cfq_set_active_queue(struct cfq_data *cfqd,
1638 struct cfq_queue *cfqq)
22e2c507
JA
1639{
1640 if (cfqq) {
b1ffe737
DS
1641 cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d",
1642 cfqd->serving_prio, cfqd->serving_type);
c1768268
TH
1643 cfq_blkiocg_update_avg_queue_size_stats(cfqg_to_blkg(cfqq->cfqg),
1644 &blkio_policy_cfq);
62a37f6b
JT
1645 cfqq->slice_start = 0;
1646 cfqq->dispatch_start = jiffies;
1647 cfqq->allocated_slice = 0;
1648 cfqq->slice_end = 0;
1649 cfqq->slice_dispatch = 0;
1650 cfqq->nr_sectors = 0;
1651
1652 cfq_clear_cfqq_wait_request(cfqq);
1653 cfq_clear_cfqq_must_dispatch(cfqq);
1654 cfq_clear_cfqq_must_alloc_slice(cfqq);
1655 cfq_clear_cfqq_fifo_expire(cfqq);
1656 cfq_mark_cfqq_slice_new(cfqq);
1657
1658 cfq_del_timer(cfqd, cfqq);
22e2c507
JA
1659 }
1660
1661 cfqd->active_queue = cfqq;
1662}
1663
7b14e3b5
JA
1664/*
1665 * current cfqq expired its slice (or was too idle), select new one
1666 */
1667static void
1668__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
e5ff082e 1669 bool timed_out)
7b14e3b5 1670{
7b679138
JA
1671 cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
1672
7b14e3b5 1673 if (cfq_cfqq_wait_request(cfqq))
812df48d 1674 cfq_del_timer(cfqd, cfqq);
7b14e3b5 1675
7b14e3b5 1676 cfq_clear_cfqq_wait_request(cfqq);
f75edf2d 1677 cfq_clear_cfqq_wait_busy(cfqq);
7b14e3b5 1678
ae54abed
SL
1679 /*
1680 * If this cfqq is shared between multiple processes, check to
1681 * make sure that those processes are still issuing I/Os within
1682 * the mean seek distance. If not, it may be time to break the
1683 * queues apart again.
1684 */
1685 if (cfq_cfqq_coop(cfqq) && CFQQ_SEEKY(cfqq))
1686 cfq_mark_cfqq_split_coop(cfqq);
1687
7b14e3b5 1688 /*
6084cdda 1689 * store what was left of this slice, if the queue idled/timed out
7b14e3b5 1690 */
c553f8e3
SL
1691 if (timed_out) {
1692 if (cfq_cfqq_slice_new(cfqq))
ba5bd520 1693 cfqq->slice_resid = cfq_scaled_cfqq_slice(cfqd, cfqq);
c553f8e3
SL
1694 else
1695 cfqq->slice_resid = cfqq->slice_end - jiffies;
7b679138
JA
1696 cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid);
1697 }
7b14e3b5 1698
e5ff082e 1699 cfq_group_served(cfqd, cfqq->cfqg, cfqq);
dae739eb 1700
f04a6424
VG
1701 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
1702 cfq_del_cfqq_rr(cfqd, cfqq);
1703
edd75ffd 1704 cfq_resort_rr_list(cfqd, cfqq);
7b14e3b5
JA
1705
1706 if (cfqq == cfqd->active_queue)
1707 cfqd->active_queue = NULL;
1708
1709 if (cfqd->active_cic) {
11a3122f 1710 put_io_context(cfqd->active_cic->icq.ioc);
7b14e3b5
JA
1711 cfqd->active_cic = NULL;
1712 }
7b14e3b5
JA
1713}
1714
e5ff082e 1715static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
7b14e3b5
JA
1716{
1717 struct cfq_queue *cfqq = cfqd->active_queue;
1718
1719 if (cfqq)
e5ff082e 1720 __cfq_slice_expired(cfqd, cfqq, timed_out);
7b14e3b5
JA
1721}
1722
498d3aa2
JA
1723/*
1724 * Get next queue for service. Unless we have a queue preemption,
1725 * we'll simply select the first cfqq in the service tree.
1726 */
6d048f53 1727static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
22e2c507 1728{
c0324a02 1729 struct cfq_rb_root *service_tree =
cdb16e8f 1730 service_tree_for(cfqd->serving_group, cfqd->serving_prio,
65b32a57 1731 cfqd->serving_type);
d9e7620e 1732
f04a6424
VG
1733 if (!cfqd->rq_queued)
1734 return NULL;
1735
1fa8f6d6
VG
1736 /* There is nothing to dispatch */
1737 if (!service_tree)
1738 return NULL;
c0324a02
CZ
1739 if (RB_EMPTY_ROOT(&service_tree->rb))
1740 return NULL;
1741 return cfq_rb_first(service_tree);
6d048f53
JA
1742}
1743
f04a6424
VG
1744static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd)
1745{
25fb5169 1746 struct cfq_group *cfqg;
f04a6424
VG
1747 struct cfq_queue *cfqq;
1748 int i, j;
1749 struct cfq_rb_root *st;
1750
1751 if (!cfqd->rq_queued)
1752 return NULL;
1753
25fb5169
VG
1754 cfqg = cfq_get_next_cfqg(cfqd);
1755 if (!cfqg)
1756 return NULL;
1757
f04a6424
VG
1758 for_each_cfqg_st(cfqg, i, j, st)
1759 if ((cfqq = cfq_rb_first(st)) != NULL)
1760 return cfqq;
1761 return NULL;
1762}
1763
498d3aa2
JA
1764/*
1765 * Get and set a new active queue for service.
1766 */
a36e71f9
JA
1767static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
1768 struct cfq_queue *cfqq)
6d048f53 1769{
e00ef799 1770 if (!cfqq)
a36e71f9 1771 cfqq = cfq_get_next_queue(cfqd);
6d048f53 1772
22e2c507 1773 __cfq_set_active_queue(cfqd, cfqq);
3b18152c 1774 return cfqq;
22e2c507
JA
1775}
1776
d9e7620e
JA
1777static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
1778 struct request *rq)
1779{
83096ebf
TH
1780 if (blk_rq_pos(rq) >= cfqd->last_position)
1781 return blk_rq_pos(rq) - cfqd->last_position;
d9e7620e 1782 else
83096ebf 1783 return cfqd->last_position - blk_rq_pos(rq);
d9e7620e
JA
1784}
1785
b2c18e1e 1786static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
e9ce335d 1787 struct request *rq)
6d048f53 1788{
e9ce335d 1789 return cfq_dist_from_last(cfqd, rq) <= CFQQ_CLOSE_THR;
6d048f53
JA
1790}
1791
a36e71f9
JA
1792static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
1793 struct cfq_queue *cur_cfqq)
1794{
f2d1f0ae 1795 struct rb_root *root = &cfqd->prio_trees[cur_cfqq->org_ioprio];
a36e71f9
JA
1796 struct rb_node *parent, *node;
1797 struct cfq_queue *__cfqq;
1798 sector_t sector = cfqd->last_position;
1799
1800 if (RB_EMPTY_ROOT(root))
1801 return NULL;
1802
1803 /*
1804 * First, if we find a request starting at the end of the last
1805 * request, choose it.
1806 */
f2d1f0ae 1807 __cfqq = cfq_prio_tree_lookup(cfqd, root, sector, &parent, NULL);
a36e71f9
JA
1808 if (__cfqq)
1809 return __cfqq;
1810
1811 /*
1812 * If the exact sector wasn't found, the parent of the NULL leaf
1813 * will contain the closest sector.
1814 */
1815 __cfqq = rb_entry(parent, struct cfq_queue, p_node);
e9ce335d 1816 if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
a36e71f9
JA
1817 return __cfqq;
1818
2e46e8b2 1819 if (blk_rq_pos(__cfqq->next_rq) < sector)
a36e71f9
JA
1820 node = rb_next(&__cfqq->p_node);
1821 else
1822 node = rb_prev(&__cfqq->p_node);
1823 if (!node)
1824 return NULL;
1825
1826 __cfqq = rb_entry(node, struct cfq_queue, p_node);
e9ce335d 1827 if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
a36e71f9
JA
1828 return __cfqq;
1829
1830 return NULL;
1831}
1832
1833/*
1834 * cfqd - obvious
1835 * cur_cfqq - passed in so that we don't decide that the current queue is
1836 * closely cooperating with itself.
1837 *
1838 * So, basically we're assuming that that cur_cfqq has dispatched at least
1839 * one request, and that cfqd->last_position reflects a position on the disk
1840 * associated with the I/O issued by cur_cfqq. I'm not sure this is a valid
1841 * assumption.
1842 */
1843static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
b3b6d040 1844 struct cfq_queue *cur_cfqq)
6d048f53 1845{
a36e71f9
JA
1846 struct cfq_queue *cfqq;
1847
39c01b21
DS
1848 if (cfq_class_idle(cur_cfqq))
1849 return NULL;
e6c5bc73
JM
1850 if (!cfq_cfqq_sync(cur_cfqq))
1851 return NULL;
1852 if (CFQQ_SEEKY(cur_cfqq))
1853 return NULL;
1854
b9d8f4c7
GJ
1855 /*
1856 * Don't search priority tree if it's the only queue in the group.
1857 */
1858 if (cur_cfqq->cfqg->nr_cfqq == 1)
1859 return NULL;
1860
6d048f53 1861 /*
d9e7620e
JA
1862 * We should notice if some of the queues are cooperating, eg
1863 * working closely on the same area of the disk. In that case,
1864 * we can group them together and don't waste time idling.
6d048f53 1865 */
a36e71f9
JA
1866 cfqq = cfqq_close(cfqd, cur_cfqq);
1867 if (!cfqq)
1868 return NULL;
1869
8682e1f1
VG
1870 /* If new queue belongs to different cfq_group, don't choose it */
1871 if (cur_cfqq->cfqg != cfqq->cfqg)
1872 return NULL;
1873
df5fe3e8
JM
1874 /*
1875 * It only makes sense to merge sync queues.
1876 */
1877 if (!cfq_cfqq_sync(cfqq))
1878 return NULL;
e6c5bc73
JM
1879 if (CFQQ_SEEKY(cfqq))
1880 return NULL;
df5fe3e8 1881
c0324a02
CZ
1882 /*
1883 * Do not merge queues of different priority classes
1884 */
1885 if (cfq_class_rt(cfqq) != cfq_class_rt(cur_cfqq))
1886 return NULL;
1887
a36e71f9 1888 return cfqq;
6d048f53
JA
1889}
1890
a6d44e98
CZ
1891/*
1892 * Determine whether we should enforce idle window for this queue.
1893 */
1894
1895static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1896{
1897 enum wl_prio_t prio = cfqq_prio(cfqq);
718eee05 1898 struct cfq_rb_root *service_tree = cfqq->service_tree;
a6d44e98 1899
f04a6424
VG
1900 BUG_ON(!service_tree);
1901 BUG_ON(!service_tree->count);
1902
b6508c16
VG
1903 if (!cfqd->cfq_slice_idle)
1904 return false;
1905
a6d44e98
CZ
1906 /* We never do for idle class queues. */
1907 if (prio == IDLE_WORKLOAD)
1908 return false;
1909
1910 /* We do for queues that were marked with idle window flag. */
3c764b7a
SL
1911 if (cfq_cfqq_idle_window(cfqq) &&
1912 !(blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag))
a6d44e98
CZ
1913 return true;
1914
1915 /*
1916 * Otherwise, we do only if they are the last ones
1917 * in their service tree.
1918 */
f5f2b6ce
SL
1919 if (service_tree->count == 1 && cfq_cfqq_sync(cfqq) &&
1920 !cfq_io_thinktime_big(cfqd, &service_tree->ttime, false))
c1e44756 1921 return true;
b1ffe737
DS
1922 cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d",
1923 service_tree->count);
c1e44756 1924 return false;
a6d44e98
CZ
1925}
1926
6d048f53 1927static void cfq_arm_slice_timer(struct cfq_data *cfqd)
22e2c507 1928{
1792669c 1929 struct cfq_queue *cfqq = cfqd->active_queue;
c5869807 1930 struct cfq_io_cq *cic;
80bdf0c7 1931 unsigned long sl, group_idle = 0;
7b14e3b5 1932
a68bbddb 1933 /*
f7d7b7a7
JA
1934 * SSD device without seek penalty, disable idling. But only do so
1935 * for devices that support queuing, otherwise we still have a problem
1936 * with sync vs async workloads.
a68bbddb 1937 */
f7d7b7a7 1938 if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)
a68bbddb
JA
1939 return;
1940
dd67d051 1941 WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
6d048f53 1942 WARN_ON(cfq_cfqq_slice_new(cfqq));
22e2c507
JA
1943
1944 /*
1945 * idle is disabled, either manually or by past process history
1946 */
80bdf0c7
VG
1947 if (!cfq_should_idle(cfqd, cfqq)) {
1948 /* no queue idling. Check for group idling */
1949 if (cfqd->cfq_group_idle)
1950 group_idle = cfqd->cfq_group_idle;
1951 else
1952 return;
1953 }
6d048f53 1954
7b679138 1955 /*
8e550632 1956 * still active requests from this queue, don't idle
7b679138 1957 */
8e550632 1958 if (cfqq->dispatched)
7b679138
JA
1959 return;
1960
22e2c507
JA
1961 /*
1962 * task has exited, don't wait
1963 */
206dc69b 1964 cic = cfqd->active_cic;
c5869807 1965 if (!cic || !atomic_read(&cic->icq.ioc->nr_tasks))
6d048f53
JA
1966 return;
1967
355b659c
CZ
1968 /*
1969 * If our average think time is larger than the remaining time
1970 * slice, then don't idle. This avoids overrunning the allotted
1971 * time slice.
1972 */
383cd721
SL
1973 if (sample_valid(cic->ttime.ttime_samples) &&
1974 (cfqq->slice_end - jiffies < cic->ttime.ttime_mean)) {
fd16d263 1975 cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%lu",
383cd721 1976 cic->ttime.ttime_mean);
355b659c 1977 return;
b1ffe737 1978 }
355b659c 1979
80bdf0c7
VG
1980 /* There are other queues in the group, don't do group idle */
1981 if (group_idle && cfqq->cfqg->nr_cfqq > 1)
1982 return;
1983
3b18152c 1984 cfq_mark_cfqq_wait_request(cfqq);
22e2c507 1985
80bdf0c7
VG
1986 if (group_idle)
1987 sl = cfqd->cfq_group_idle;
1988 else
1989 sl = cfqd->cfq_slice_idle;
206dc69b 1990
7b14e3b5 1991 mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
c1768268
TH
1992 cfq_blkiocg_update_set_idle_time_stats(cfqg_to_blkg(cfqq->cfqg),
1993 &blkio_policy_cfq);
80bdf0c7
VG
1994 cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl,
1995 group_idle ? 1 : 0);
1da177e4
LT
1996}
1997
498d3aa2
JA
1998/*
1999 * Move request from internal lists to the request queue dispatch list.
2000 */
165125e1 2001static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
1da177e4 2002{
3ed9a296 2003 struct cfq_data *cfqd = q->elevator->elevator_data;
5e705374 2004 struct cfq_queue *cfqq = RQ_CFQQ(rq);
22e2c507 2005
7b679138
JA
2006 cfq_log_cfqq(cfqd, cfqq, "dispatch_insert");
2007
06d21886 2008 cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq);
5380a101 2009 cfq_remove_request(rq);
6d048f53 2010 cfqq->dispatched++;
80bdf0c7 2011 (RQ_CFQG(rq))->dispatched++;
5380a101 2012 elv_dispatch_sort(q, rq);
3ed9a296 2013
53c583d2 2014 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
c4e7893e 2015 cfqq->nr_sectors += blk_rq_sectors(rq);
0381411e 2016 cfq_blkiocg_update_dispatch_stats(cfqg_to_blkg(cfqq->cfqg),
c1768268
TH
2017 &blkio_policy_cfq, blk_rq_bytes(rq),
2018 rq_data_dir(rq), rq_is_sync(rq));
1da177e4
LT
2019}
2020
2021/*
2022 * return expired entry, or NULL to just start from scratch in rbtree
2023 */
febffd61 2024static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
1da177e4 2025{
30996f40 2026 struct request *rq = NULL;
1da177e4 2027
3b18152c 2028 if (cfq_cfqq_fifo_expire(cfqq))
1da177e4 2029 return NULL;
cb887411
JA
2030
2031 cfq_mark_cfqq_fifo_expire(cfqq);
2032
89850f7e
JA
2033 if (list_empty(&cfqq->fifo))
2034 return NULL;
1da177e4 2035
89850f7e 2036 rq = rq_entry_fifo(cfqq->fifo.next);
30996f40 2037 if (time_before(jiffies, rq_fifo_time(rq)))
7b679138 2038 rq = NULL;
1da177e4 2039
30996f40 2040 cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
6d048f53 2041 return rq;
1da177e4
LT
2042}
2043
22e2c507
JA
2044static inline int
2045cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2046{
2047 const int base_rq = cfqd->cfq_slice_async_rq;
1da177e4 2048
22e2c507 2049 WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
1da177e4 2050
b9f8ce05 2051 return 2 * base_rq * (IOPRIO_BE_NR - cfqq->ioprio);
1da177e4
LT
2052}
2053
df5fe3e8
JM
2054/*
2055 * Must be called with the queue_lock held.
2056 */
2057static int cfqq_process_refs(struct cfq_queue *cfqq)
2058{
2059 int process_refs, io_refs;
2060
2061 io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE];
30d7b944 2062 process_refs = cfqq->ref - io_refs;
df5fe3e8
JM
2063 BUG_ON(process_refs < 0);
2064 return process_refs;
2065}
2066
2067static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
2068{
e6c5bc73 2069 int process_refs, new_process_refs;
df5fe3e8
JM
2070 struct cfq_queue *__cfqq;
2071
c10b61f0
JM
2072 /*
2073 * If there are no process references on the new_cfqq, then it is
2074 * unsafe to follow the ->new_cfqq chain as other cfqq's in the
2075 * chain may have dropped their last reference (not just their
2076 * last process reference).
2077 */
2078 if (!cfqq_process_refs(new_cfqq))
2079 return;
2080
df5fe3e8
JM
2081 /* Avoid a circular list and skip interim queue merges */
2082 while ((__cfqq = new_cfqq->new_cfqq)) {
2083 if (__cfqq == cfqq)
2084 return;
2085 new_cfqq = __cfqq;
2086 }
2087
2088 process_refs = cfqq_process_refs(cfqq);
c10b61f0 2089 new_process_refs = cfqq_process_refs(new_cfqq);
df5fe3e8
JM
2090 /*
2091 * If the process for the cfqq has gone away, there is no
2092 * sense in merging the queues.
2093 */
c10b61f0 2094 if (process_refs == 0 || new_process_refs == 0)
df5fe3e8
JM
2095 return;
2096
e6c5bc73
JM
2097 /*
2098 * Merge in the direction of the lesser amount of work.
2099 */
e6c5bc73
JM
2100 if (new_process_refs >= process_refs) {
2101 cfqq->new_cfqq = new_cfqq;
30d7b944 2102 new_cfqq->ref += process_refs;
e6c5bc73
JM
2103 } else {
2104 new_cfqq->new_cfqq = cfqq;
30d7b944 2105 cfqq->ref += new_process_refs;
e6c5bc73 2106 }
df5fe3e8
JM
2107}
2108
cdb16e8f 2109static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd,
65b32a57 2110 struct cfq_group *cfqg, enum wl_prio_t prio)
718eee05
CZ
2111{
2112 struct cfq_queue *queue;
2113 int i;
2114 bool key_valid = false;
2115 unsigned long lowest_key = 0;
2116 enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD;
2117
65b32a57
VG
2118 for (i = 0; i <= SYNC_WORKLOAD; ++i) {
2119 /* select the one with lowest rb_key */
2120 queue = cfq_rb_first(service_tree_for(cfqg, prio, i));
718eee05
CZ
2121 if (queue &&
2122 (!key_valid || time_before(queue->rb_key, lowest_key))) {
2123 lowest_key = queue->rb_key;
2124 cur_best = i;
2125 key_valid = true;
2126 }
2127 }
2128
2129 return cur_best;
2130}
2131
cdb16e8f 2132static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
718eee05 2133{
718eee05
CZ
2134 unsigned slice;
2135 unsigned count;
cdb16e8f 2136 struct cfq_rb_root *st;
58ff82f3 2137 unsigned group_slice;
e4ea0c16 2138 enum wl_prio_t original_prio = cfqd->serving_prio;
1fa8f6d6 2139
718eee05 2140 /* Choose next priority. RT > BE > IDLE */
58ff82f3 2141 if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg))
718eee05 2142 cfqd->serving_prio = RT_WORKLOAD;
58ff82f3 2143 else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg))
718eee05
CZ
2144 cfqd->serving_prio = BE_WORKLOAD;
2145 else {
2146 cfqd->serving_prio = IDLE_WORKLOAD;
2147 cfqd->workload_expires = jiffies + 1;
2148 return;
2149 }
2150
e4ea0c16
SL
2151 if (original_prio != cfqd->serving_prio)
2152 goto new_workload;
2153
718eee05
CZ
2154 /*
2155 * For RT and BE, we have to choose also the type
2156 * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload
2157 * expiration time
2158 */
65b32a57 2159 st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
cdb16e8f 2160 count = st->count;
718eee05
CZ
2161
2162 /*
65b32a57 2163 * check workload expiration, and that we still have other queues ready
718eee05 2164 */
65b32a57 2165 if (count && !time_after(jiffies, cfqd->workload_expires))
718eee05
CZ
2166 return;
2167
e4ea0c16 2168new_workload:
718eee05
CZ
2169 /* otherwise select new workload type */
2170 cfqd->serving_type =
65b32a57
VG
2171 cfq_choose_wl(cfqd, cfqg, cfqd->serving_prio);
2172 st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
cdb16e8f 2173 count = st->count;
718eee05
CZ
2174
2175 /*
2176 * the workload slice is computed as a fraction of target latency
2177 * proportional to the number of queues in that workload, over
2178 * all the queues in the same priority class
2179 */
58ff82f3
VG
2180 group_slice = cfq_group_slice(cfqd, cfqg);
2181
2182 slice = group_slice * count /
2183 max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_prio],
2184 cfq_group_busy_queues_wl(cfqd->serving_prio, cfqd, cfqg));
718eee05 2185
f26bd1f0
VG
2186 if (cfqd->serving_type == ASYNC_WORKLOAD) {
2187 unsigned int tmp;
2188
2189 /*
2190 * Async queues are currently system wide. Just taking
2191 * proportion of queues with-in same group will lead to higher
2192 * async ratio system wide as generally root group is going
2193 * to have higher weight. A more accurate thing would be to
2194 * calculate system wide asnc/sync ratio.
2195 */
2196 tmp = cfq_target_latency * cfqg_busy_async_queues(cfqd, cfqg);
2197 tmp = tmp/cfqd->busy_queues;
2198 slice = min_t(unsigned, slice, tmp);
2199
718eee05
CZ
2200 /* async workload slice is scaled down according to
2201 * the sync/async slice ratio. */
2202 slice = slice * cfqd->cfq_slice[0] / cfqd->cfq_slice[1];
f26bd1f0 2203 } else
718eee05
CZ
2204 /* sync workload slice is at least 2 * cfq_slice_idle */
2205 slice = max(slice, 2 * cfqd->cfq_slice_idle);
2206
2207 slice = max_t(unsigned, slice, CFQ_MIN_TT);
b1ffe737 2208 cfq_log(cfqd, "workload slice:%d", slice);
718eee05
CZ
2209 cfqd->workload_expires = jiffies + slice;
2210}
2211
1fa8f6d6
VG
2212static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd)
2213{
2214 struct cfq_rb_root *st = &cfqd->grp_service_tree;
25bc6b07 2215 struct cfq_group *cfqg;
1fa8f6d6
VG
2216
2217 if (RB_EMPTY_ROOT(&st->rb))
2218 return NULL;
25bc6b07 2219 cfqg = cfq_rb_first_group(st);
25bc6b07
VG
2220 update_min_vdisktime(st);
2221 return cfqg;
1fa8f6d6
VG
2222}
2223
cdb16e8f
VG
2224static void cfq_choose_cfqg(struct cfq_data *cfqd)
2225{
1fa8f6d6
VG
2226 struct cfq_group *cfqg = cfq_get_next_cfqg(cfqd);
2227
2228 cfqd->serving_group = cfqg;
dae739eb
VG
2229
2230 /* Restore the workload type data */
2231 if (cfqg->saved_workload_slice) {
2232 cfqd->workload_expires = jiffies + cfqg->saved_workload_slice;
2233 cfqd->serving_type = cfqg->saved_workload;
2234 cfqd->serving_prio = cfqg->saved_serving_prio;
66ae2919
GJ
2235 } else
2236 cfqd->workload_expires = jiffies - 1;
2237
1fa8f6d6 2238 choose_service_tree(cfqd, cfqg);
cdb16e8f
VG
2239}
2240
22e2c507 2241/*
498d3aa2
JA
2242 * Select a queue for service. If we have a current active queue,
2243 * check whether to continue servicing it, or retrieve and set a new one.
22e2c507 2244 */
1b5ed5e1 2245static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
1da177e4 2246{
a36e71f9 2247 struct cfq_queue *cfqq, *new_cfqq = NULL;
1da177e4 2248
22e2c507
JA
2249 cfqq = cfqd->active_queue;
2250 if (!cfqq)
2251 goto new_queue;
1da177e4 2252
f04a6424
VG
2253 if (!cfqd->rq_queued)
2254 return NULL;
c244bb50
VG
2255
2256 /*
2257 * We were waiting for group to get backlogged. Expire the queue
2258 */
2259 if (cfq_cfqq_wait_busy(cfqq) && !RB_EMPTY_ROOT(&cfqq->sort_list))
2260 goto expire;
2261
22e2c507 2262 /*
6d048f53 2263 * The active queue has run out of time, expire it and select new.
22e2c507 2264 */
7667aa06
VG
2265 if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) {
2266 /*
2267 * If slice had not expired at the completion of last request
2268 * we might not have turned on wait_busy flag. Don't expire
2269 * the queue yet. Allow the group to get backlogged.
2270 *
2271 * The very fact that we have used the slice, that means we
2272 * have been idling all along on this queue and it should be
2273 * ok to wait for this request to complete.
2274 */
82bbbf28
VG
2275 if (cfqq->cfqg->nr_cfqq == 1 && RB_EMPTY_ROOT(&cfqq->sort_list)
2276 && cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
2277 cfqq = NULL;
7667aa06 2278 goto keep_queue;
82bbbf28 2279 } else
80bdf0c7 2280 goto check_group_idle;
7667aa06 2281 }
1da177e4 2282
22e2c507 2283 /*
6d048f53
JA
2284 * The active queue has requests and isn't expired, allow it to
2285 * dispatch.
22e2c507 2286 */
dd67d051 2287 if (!RB_EMPTY_ROOT(&cfqq->sort_list))
22e2c507 2288 goto keep_queue;
6d048f53 2289
a36e71f9
JA
2290 /*
2291 * If another queue has a request waiting within our mean seek
2292 * distance, let it run. The expire code will check for close
2293 * cooperators and put the close queue at the front of the service
df5fe3e8 2294 * tree. If possible, merge the expiring queue with the new cfqq.
a36e71f9 2295 */
b3b6d040 2296 new_cfqq = cfq_close_cooperator(cfqd, cfqq);
df5fe3e8
JM
2297 if (new_cfqq) {
2298 if (!cfqq->new_cfqq)
2299 cfq_setup_merge(cfqq, new_cfqq);
a36e71f9 2300 goto expire;
df5fe3e8 2301 }
a36e71f9 2302
6d048f53
JA
2303 /*
2304 * No requests pending. If the active queue still has requests in
2305 * flight or is idling for a new request, allow either of these
2306 * conditions to happen (or time out) before selecting a new queue.
2307 */
80bdf0c7
VG
2308 if (timer_pending(&cfqd->idle_slice_timer)) {
2309 cfqq = NULL;
2310 goto keep_queue;
2311 }
2312
8e1ac665
SL
2313 /*
2314 * This is a deep seek queue, but the device is much faster than
2315 * the queue can deliver, don't idle
2316 **/
2317 if (CFQQ_SEEKY(cfqq) && cfq_cfqq_idle_window(cfqq) &&
2318 (cfq_cfqq_slice_new(cfqq) ||
2319 (cfqq->slice_end - jiffies > jiffies - cfqq->slice_start))) {
2320 cfq_clear_cfqq_deep(cfqq);
2321 cfq_clear_cfqq_idle_window(cfqq);
2322 }
2323
80bdf0c7
VG
2324 if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
2325 cfqq = NULL;
2326 goto keep_queue;
2327 }
2328
2329 /*
2330 * If group idle is enabled and there are requests dispatched from
2331 * this group, wait for requests to complete.
2332 */
2333check_group_idle:
7700fc4f
SL
2334 if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1 &&
2335 cfqq->cfqg->dispatched &&
2336 !cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true)) {
caaa5f9f
JA
2337 cfqq = NULL;
2338 goto keep_queue;
22e2c507
JA
2339 }
2340
3b18152c 2341expire:
e5ff082e 2342 cfq_slice_expired(cfqd, 0);
3b18152c 2343new_queue:
718eee05
CZ
2344 /*
2345 * Current queue expired. Check if we have to switch to a new
2346 * service tree
2347 */
2348 if (!new_cfqq)
cdb16e8f 2349 cfq_choose_cfqg(cfqd);
718eee05 2350
a36e71f9 2351 cfqq = cfq_set_active_queue(cfqd, new_cfqq);
22e2c507 2352keep_queue:
3b18152c 2353 return cfqq;
22e2c507
JA
2354}
2355
febffd61 2356static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
d9e7620e
JA
2357{
2358 int dispatched = 0;
2359
2360 while (cfqq->next_rq) {
2361 cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
2362 dispatched++;
2363 }
2364
2365 BUG_ON(!list_empty(&cfqq->fifo));
f04a6424
VG
2366
2367 /* By default cfqq is not expired if it is empty. Do it explicitly */
e5ff082e 2368 __cfq_slice_expired(cfqq->cfqd, cfqq, 0);
d9e7620e
JA
2369 return dispatched;
2370}
2371
498d3aa2
JA
2372/*
2373 * Drain our current requests. Used for barriers and when switching
2374 * io schedulers on-the-fly.
2375 */
d9e7620e 2376static int cfq_forced_dispatch(struct cfq_data *cfqd)
1b5ed5e1 2377{
0871714e 2378 struct cfq_queue *cfqq;
d9e7620e 2379 int dispatched = 0;
cdb16e8f 2380
3440c49f 2381 /* Expire the timeslice of the current active queue first */
e5ff082e 2382 cfq_slice_expired(cfqd, 0);
3440c49f
DS
2383 while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) {
2384 __cfq_set_active_queue(cfqd, cfqq);
f04a6424 2385 dispatched += __cfq_forced_dispatch_cfqq(cfqq);
3440c49f 2386 }
1b5ed5e1 2387
1b5ed5e1
TH
2388 BUG_ON(cfqd->busy_queues);
2389
6923715a 2390 cfq_log(cfqd, "forced_dispatch=%d", dispatched);
1b5ed5e1
TH
2391 return dispatched;
2392}
2393
abc3c744
SL
2394static inline bool cfq_slice_used_soon(struct cfq_data *cfqd,
2395 struct cfq_queue *cfqq)
2396{
2397 /* the queue hasn't finished any request, can't estimate */
2398 if (cfq_cfqq_slice_new(cfqq))
c1e44756 2399 return true;
abc3c744
SL
2400 if (time_after(jiffies + cfqd->cfq_slice_idle * cfqq->dispatched,
2401 cfqq->slice_end))
c1e44756 2402 return true;
abc3c744 2403
c1e44756 2404 return false;
abc3c744
SL
2405}
2406
0b182d61 2407static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2f5cb738 2408{
2f5cb738 2409 unsigned int max_dispatch;
22e2c507 2410
5ad531db
JA
2411 /*
2412 * Drain async requests before we start sync IO
2413 */
53c583d2 2414 if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_flight[BLK_RW_ASYNC])
0b182d61 2415 return false;
5ad531db 2416
2f5cb738
JA
2417 /*
2418 * If this is an async queue and we have sync IO in flight, let it wait
2419 */
53c583d2 2420 if (cfqd->rq_in_flight[BLK_RW_SYNC] && !cfq_cfqq_sync(cfqq))
0b182d61 2421 return false;
2f5cb738 2422
abc3c744 2423 max_dispatch = max_t(unsigned int, cfqd->cfq_quantum / 2, 1);
2f5cb738
JA
2424 if (cfq_class_idle(cfqq))
2425 max_dispatch = 1;
b4878f24 2426
2f5cb738
JA
2427 /*
2428 * Does this cfqq already have too much IO in flight?
2429 */
2430 if (cfqq->dispatched >= max_dispatch) {
ef8a41df 2431 bool promote_sync = false;
2f5cb738
JA
2432 /*
2433 * idle queue must always only have a single IO in flight
2434 */
3ed9a296 2435 if (cfq_class_idle(cfqq))
0b182d61 2436 return false;
3ed9a296 2437
ef8a41df 2438 /*
c4ade94f
LS
2439 * If there is only one sync queue
2440 * we can ignore async queue here and give the sync
ef8a41df
SL
2441 * queue no dispatch limit. The reason is a sync queue can
2442 * preempt async queue, limiting the sync queue doesn't make
2443 * sense. This is useful for aiostress test.
2444 */
c4ade94f
LS
2445 if (cfq_cfqq_sync(cfqq) && cfqd->busy_sync_queues == 1)
2446 promote_sync = true;
ef8a41df 2447
2f5cb738
JA
2448 /*
2449 * We have other queues, don't allow more IO from this one
2450 */
ef8a41df
SL
2451 if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq) &&
2452 !promote_sync)
0b182d61 2453 return false;
9ede209e 2454
365722bb 2455 /*
474b18cc 2456 * Sole queue user, no limit
365722bb 2457 */
ef8a41df 2458 if (cfqd->busy_queues == 1 || promote_sync)
abc3c744
SL
2459 max_dispatch = -1;
2460 else
2461 /*
2462 * Normally we start throttling cfqq when cfq_quantum/2
2463 * requests have been dispatched. But we can drive
2464 * deeper queue depths at the beginning of slice
2465 * subjected to upper limit of cfq_quantum.
2466 * */
2467 max_dispatch = cfqd->cfq_quantum;
8e296755
JA
2468 }
2469
2470 /*
2471 * Async queues must wait a bit before being allowed dispatch.
2472 * We also ramp up the dispatch depth gradually for async IO,
2473 * based on the last sync IO we serviced
2474 */
963b72fc 2475 if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
573412b2 2476 unsigned long last_sync = jiffies - cfqd->last_delayed_sync;
8e296755 2477 unsigned int depth;
365722bb 2478
61f0c1dc 2479 depth = last_sync / cfqd->cfq_slice[1];
e00c54c3
JA
2480 if (!depth && !cfqq->dispatched)
2481 depth = 1;
8e296755
JA
2482 if (depth < max_dispatch)
2483 max_dispatch = depth;
2f5cb738 2484 }
3ed9a296 2485
0b182d61
JA
2486 /*
2487 * If we're below the current max, allow a dispatch
2488 */
2489 return cfqq->dispatched < max_dispatch;
2490}
2491
2492/*
2493 * Dispatch a request from cfqq, moving them to the request queue
2494 * dispatch list.
2495 */
2496static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2497{
2498 struct request *rq;
2499
2500 BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
2501
2502 if (!cfq_may_dispatch(cfqd, cfqq))
2503 return false;
2504
2505 /*
2506 * follow expired path, else get first next available
2507 */
2508 rq = cfq_check_fifo(cfqq);
2509 if (!rq)
2510 rq = cfqq->next_rq;
2511
2512 /*
2513 * insert request into driver dispatch list
2514 */
2515 cfq_dispatch_insert(cfqd->queue, rq);
2516
2517 if (!cfqd->active_cic) {
c5869807 2518 struct cfq_io_cq *cic = RQ_CIC(rq);
0b182d61 2519
c5869807 2520 atomic_long_inc(&cic->icq.ioc->refcount);
0b182d61
JA
2521 cfqd->active_cic = cic;
2522 }
2523
2524 return true;
2525}
2526
2527/*
2528 * Find the cfqq that we need to service and move a request from that to the
2529 * dispatch list
2530 */
2531static int cfq_dispatch_requests(struct request_queue *q, int force)
2532{
2533 struct cfq_data *cfqd = q->elevator->elevator_data;
2534 struct cfq_queue *cfqq;
2535
2536 if (!cfqd->busy_queues)
2537 return 0;
2538
2539 if (unlikely(force))
2540 return cfq_forced_dispatch(cfqd);
2541
2542 cfqq = cfq_select_queue(cfqd);
2543 if (!cfqq)
8e296755
JA
2544 return 0;
2545
2f5cb738 2546 /*
0b182d61 2547 * Dispatch a request from this cfqq, if it is allowed
2f5cb738 2548 */
0b182d61
JA
2549 if (!cfq_dispatch_request(cfqd, cfqq))
2550 return 0;
2551
2f5cb738 2552 cfqq->slice_dispatch++;
b029195d 2553 cfq_clear_cfqq_must_dispatch(cfqq);
22e2c507 2554
2f5cb738
JA
2555 /*
2556 * expire an async queue immediately if it has used up its slice. idle
2557 * queue always expire after 1 dispatch round.
2558 */
2559 if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
2560 cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
2561 cfq_class_idle(cfqq))) {
2562 cfqq->slice_end = jiffies + 1;
e5ff082e 2563 cfq_slice_expired(cfqd, 0);
1da177e4
LT
2564 }
2565
b217a903 2566 cfq_log_cfqq(cfqd, cfqq, "dispatched a request");
2f5cb738 2567 return 1;
1da177e4
LT
2568}
2569
1da177e4 2570/*
5e705374
JA
2571 * task holds one reference to the queue, dropped when task exits. each rq
2572 * in-flight on this queue also holds a reference, dropped when rq is freed.
1da177e4 2573 *
b1c35769 2574 * Each cfq queue took a reference on the parent group. Drop it now.
1da177e4
LT
2575 * queue lock must be held here.
2576 */
2577static void cfq_put_queue(struct cfq_queue *cfqq)
2578{
22e2c507 2579 struct cfq_data *cfqd = cfqq->cfqd;
0bbfeb83 2580 struct cfq_group *cfqg;
22e2c507 2581
30d7b944 2582 BUG_ON(cfqq->ref <= 0);
1da177e4 2583
30d7b944
SL
2584 cfqq->ref--;
2585 if (cfqq->ref)
1da177e4
LT
2586 return;
2587
7b679138 2588 cfq_log_cfqq(cfqd, cfqq, "put_queue");
1da177e4 2589 BUG_ON(rb_first(&cfqq->sort_list));
22e2c507 2590 BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
b1c35769 2591 cfqg = cfqq->cfqg;
1da177e4 2592
28f95cbc 2593 if (unlikely(cfqd->active_queue == cfqq)) {
e5ff082e 2594 __cfq_slice_expired(cfqd, cfqq, 0);
23e018a1 2595 cfq_schedule_dispatch(cfqd);
28f95cbc 2596 }
22e2c507 2597
f04a6424 2598 BUG_ON(cfq_cfqq_on_rr(cfqq));
1da177e4 2599 kmem_cache_free(cfq_pool, cfqq);
1adaf3dd 2600 blkg_put(cfqg_to_blkg(cfqg));
1da177e4
LT
2601}
2602
d02a2c07 2603static void cfq_put_cooperator(struct cfq_queue *cfqq)
1da177e4 2604{
df5fe3e8
JM
2605 struct cfq_queue *__cfqq, *next;
2606
df5fe3e8
JM
2607 /*
2608 * If this queue was scheduled to merge with another queue, be
2609 * sure to drop the reference taken on that queue (and others in
2610 * the merge chain). See cfq_setup_merge and cfq_merge_cfqqs.
2611 */
2612 __cfqq = cfqq->new_cfqq;
2613 while (__cfqq) {
2614 if (__cfqq == cfqq) {
2615 WARN(1, "cfqq->new_cfqq loop detected\n");
2616 break;
2617 }
2618 next = __cfqq->new_cfqq;
2619 cfq_put_queue(__cfqq);
2620 __cfqq = next;
2621 }
d02a2c07
SL
2622}
2623
2624static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2625{
2626 if (unlikely(cfqq == cfqd->active_queue)) {
2627 __cfq_slice_expired(cfqd, cfqq, 0);
2628 cfq_schedule_dispatch(cfqd);
2629 }
2630
2631 cfq_put_cooperator(cfqq);
df5fe3e8 2632
89850f7e
JA
2633 cfq_put_queue(cfqq);
2634}
22e2c507 2635
9b84cacd
TH
2636static void cfq_init_icq(struct io_cq *icq)
2637{
2638 struct cfq_io_cq *cic = icq_to_cic(icq);
2639
2640 cic->ttime.last_end_request = jiffies;
2641}
2642
c5869807 2643static void cfq_exit_icq(struct io_cq *icq)
89850f7e 2644{
c5869807 2645 struct cfq_io_cq *cic = icq_to_cic(icq);
283287a5 2646 struct cfq_data *cfqd = cic_to_cfqd(cic);
4faa3c81 2647
ff6657c6
JA
2648 if (cic->cfqq[BLK_RW_ASYNC]) {
2649 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]);
2650 cic->cfqq[BLK_RW_ASYNC] = NULL;
12a05732
AV
2651 }
2652
ff6657c6
JA
2653 if (cic->cfqq[BLK_RW_SYNC]) {
2654 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_SYNC]);
2655 cic->cfqq[BLK_RW_SYNC] = NULL;
12a05732 2656 }
89850f7e
JA
2657}
2658
fd0928df 2659static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc)
22e2c507
JA
2660{
2661 struct task_struct *tsk = current;
2662 int ioprio_class;
2663
3b18152c 2664 if (!cfq_cfqq_prio_changed(cfqq))
22e2c507
JA
2665 return;
2666
fd0928df 2667 ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio);
22e2c507 2668 switch (ioprio_class) {
fe094d98
JA
2669 default:
2670 printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
2671 case IOPRIO_CLASS_NONE:
2672 /*
6d63c275 2673 * no prio set, inherit CPU scheduling settings
fe094d98
JA
2674 */
2675 cfqq->ioprio = task_nice_ioprio(tsk);
6d63c275 2676 cfqq->ioprio_class = task_nice_ioclass(tsk);
fe094d98
JA
2677 break;
2678 case IOPRIO_CLASS_RT:
2679 cfqq->ioprio = task_ioprio(ioc);
2680 cfqq->ioprio_class = IOPRIO_CLASS_RT;
2681 break;
2682 case IOPRIO_CLASS_BE:
2683 cfqq->ioprio = task_ioprio(ioc);
2684 cfqq->ioprio_class = IOPRIO_CLASS_BE;
2685 break;
2686 case IOPRIO_CLASS_IDLE:
2687 cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
2688 cfqq->ioprio = 7;
2689 cfq_clear_cfqq_idle_window(cfqq);
2690 break;
22e2c507
JA
2691 }
2692
2693 /*
2694 * keep track of original prio settings in case we have to temporarily
2695 * elevate the priority of this queue
2696 */
2697 cfqq->org_ioprio = cfqq->ioprio;
3b18152c 2698 cfq_clear_cfqq_prio_changed(cfqq);
22e2c507
JA
2699}
2700
c5869807 2701static void changed_ioprio(struct cfq_io_cq *cic)
22e2c507 2702{
bca4b914 2703 struct cfq_data *cfqd = cic_to_cfqd(cic);
478a82b0 2704 struct cfq_queue *cfqq;
35e6077c 2705
caaa5f9f
JA
2706 if (unlikely(!cfqd))
2707 return;
2708
ff6657c6 2709 cfqq = cic->cfqq[BLK_RW_ASYNC];
caaa5f9f
JA
2710 if (cfqq) {
2711 struct cfq_queue *new_cfqq;
c5869807 2712 new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic->icq.ioc,
ff6657c6 2713 GFP_ATOMIC);
caaa5f9f 2714 if (new_cfqq) {
ff6657c6 2715 cic->cfqq[BLK_RW_ASYNC] = new_cfqq;
caaa5f9f
JA
2716 cfq_put_queue(cfqq);
2717 }
22e2c507 2718 }
caaa5f9f 2719
ff6657c6 2720 cfqq = cic->cfqq[BLK_RW_SYNC];
caaa5f9f
JA
2721 if (cfqq)
2722 cfq_mark_cfqq_prio_changed(cfqq);
22e2c507
JA
2723}
2724
d5036d77 2725static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
a6151c3a 2726 pid_t pid, bool is_sync)
d5036d77
JA
2727{
2728 RB_CLEAR_NODE(&cfqq->rb_node);
2729 RB_CLEAR_NODE(&cfqq->p_node);
2730 INIT_LIST_HEAD(&cfqq->fifo);
2731
30d7b944 2732 cfqq->ref = 0;
d5036d77
JA
2733 cfqq->cfqd = cfqd;
2734
2735 cfq_mark_cfqq_prio_changed(cfqq);
2736
2737 if (is_sync) {
2738 if (!cfq_class_idle(cfqq))
2739 cfq_mark_cfqq_idle_window(cfqq);
2740 cfq_mark_cfqq_sync(cfqq);
2741 }
2742 cfqq->pid = pid;
2743}
2744
24610333 2745#ifdef CONFIG_CFQ_GROUP_IOSCHED
c5869807 2746static void changed_cgroup(struct cfq_io_cq *cic)
24610333
VG
2747{
2748 struct cfq_queue *sync_cfqq = cic_to_cfqq(cic, 1);
bca4b914 2749 struct cfq_data *cfqd = cic_to_cfqd(cic);
24610333
VG
2750 struct request_queue *q;
2751
2752 if (unlikely(!cfqd))
2753 return;
2754
2755 q = cfqd->queue;
2756
24610333
VG
2757 if (sync_cfqq) {
2758 /*
2759 * Drop reference to sync queue. A new sync queue will be
2760 * assigned in new group upon arrival of a fresh request.
2761 */
2762 cfq_log_cfqq(cfqd, sync_cfqq, "changed cgroup");
2763 cic_set_cfqq(cic, NULL, 1);
2764 cfq_put_queue(sync_cfqq);
2765 }
24610333 2766}
24610333
VG
2767#endif /* CONFIG_CFQ_GROUP_IOSCHED */
2768
22e2c507 2769static struct cfq_queue *
a6151c3a 2770cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync,
fd0928df 2771 struct io_context *ioc, gfp_t gfp_mask)
22e2c507 2772{
0a5a7d0e 2773 struct blkio_cgroup *blkcg;
22e2c507 2774 struct cfq_queue *cfqq, *new_cfqq = NULL;
c5869807 2775 struct cfq_io_cq *cic;
cdb16e8f 2776 struct cfq_group *cfqg;
22e2c507
JA
2777
2778retry:
2a7f1244
TH
2779 rcu_read_lock();
2780
0a5a7d0e
TH
2781 blkcg = task_blkio_cgroup(current);
2782
cd1604fa
TH
2783 cfqg = cfq_lookup_create_cfqg(cfqd, blkcg);
2784
4ac845a2 2785 cic = cfq_cic_lookup(cfqd, ioc);
91fac317
VT
2786 /* cic always exists here */
2787 cfqq = cic_to_cfqq(cic, is_sync);
22e2c507 2788
6118b70b
JA
2789 /*
2790 * Always try a new alloc if we fell back to the OOM cfqq
2791 * originally, since it should just be a temporary situation.
2792 */
2793 if (!cfqq || cfqq == &cfqd->oom_cfqq) {
2794 cfqq = NULL;
22e2c507
JA
2795 if (new_cfqq) {
2796 cfqq = new_cfqq;
2797 new_cfqq = NULL;
2798 } else if (gfp_mask & __GFP_WAIT) {
2a7f1244 2799 rcu_read_unlock();
22e2c507 2800 spin_unlock_irq(cfqd->queue->queue_lock);
94f6030c 2801 new_cfqq = kmem_cache_alloc_node(cfq_pool,
6118b70b 2802 gfp_mask | __GFP_ZERO,
94f6030c 2803 cfqd->queue->node);
22e2c507 2804 spin_lock_irq(cfqd->queue->queue_lock);
6118b70b
JA
2805 if (new_cfqq)
2806 goto retry;
22e2c507 2807 } else {
94f6030c
CL
2808 cfqq = kmem_cache_alloc_node(cfq_pool,
2809 gfp_mask | __GFP_ZERO,
2810 cfqd->queue->node);
22e2c507
JA
2811 }
2812
6118b70b
JA
2813 if (cfqq) {
2814 cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
2815 cfq_init_prio_data(cfqq, ioc);
cdb16e8f 2816 cfq_link_cfqq_cfqg(cfqq, cfqg);
6118b70b
JA
2817 cfq_log_cfqq(cfqd, cfqq, "alloced");
2818 } else
2819 cfqq = &cfqd->oom_cfqq;
22e2c507
JA
2820 }
2821
2822 if (new_cfqq)
2823 kmem_cache_free(cfq_pool, new_cfqq);
2824
2a7f1244 2825 rcu_read_unlock();
22e2c507
JA
2826 return cfqq;
2827}
2828
c2dea2d1
VT
2829static struct cfq_queue **
2830cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
2831{
fe094d98 2832 switch (ioprio_class) {
c2dea2d1
VT
2833 case IOPRIO_CLASS_RT:
2834 return &cfqd->async_cfqq[0][ioprio];
2835 case IOPRIO_CLASS_BE:
2836 return &cfqd->async_cfqq[1][ioprio];
2837 case IOPRIO_CLASS_IDLE:
2838 return &cfqd->async_idle_cfqq;
2839 default:
2840 BUG();
2841 }
2842}
2843
15c31be4 2844static struct cfq_queue *
a6151c3a 2845cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc,
15c31be4
JA
2846 gfp_t gfp_mask)
2847{
fd0928df
JA
2848 const int ioprio = task_ioprio(ioc);
2849 const int ioprio_class = task_ioprio_class(ioc);
c2dea2d1 2850 struct cfq_queue **async_cfqq = NULL;
15c31be4
JA
2851 struct cfq_queue *cfqq = NULL;
2852
c2dea2d1
VT
2853 if (!is_sync) {
2854 async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio);
2855 cfqq = *async_cfqq;
2856 }
2857
6118b70b 2858 if (!cfqq)
fd0928df 2859 cfqq = cfq_find_alloc_queue(cfqd, is_sync, ioc, gfp_mask);
15c31be4
JA
2860
2861 /*
2862 * pin the queue now that it's allocated, scheduler exit will prune it
2863 */
c2dea2d1 2864 if (!is_sync && !(*async_cfqq)) {
30d7b944 2865 cfqq->ref++;
c2dea2d1 2866 *async_cfqq = cfqq;
15c31be4
JA
2867 }
2868
30d7b944 2869 cfqq->ref++;
15c31be4
JA
2870 return cfqq;
2871}
2872
22e2c507 2873static void
383cd721 2874__cfq_update_io_thinktime(struct cfq_ttime *ttime, unsigned long slice_idle)
1da177e4 2875{
383cd721
SL
2876 unsigned long elapsed = jiffies - ttime->last_end_request;
2877 elapsed = min(elapsed, 2UL * slice_idle);
db3b5848 2878
383cd721
SL
2879 ttime->ttime_samples = (7*ttime->ttime_samples + 256) / 8;
2880 ttime->ttime_total = (7*ttime->ttime_total + 256*elapsed) / 8;
2881 ttime->ttime_mean = (ttime->ttime_total + 128) / ttime->ttime_samples;
2882}
2883
2884static void
2885cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
c5869807 2886 struct cfq_io_cq *cic)
383cd721 2887{
f5f2b6ce 2888 if (cfq_cfqq_sync(cfqq)) {
383cd721 2889 __cfq_update_io_thinktime(&cic->ttime, cfqd->cfq_slice_idle);
f5f2b6ce
SL
2890 __cfq_update_io_thinktime(&cfqq->service_tree->ttime,
2891 cfqd->cfq_slice_idle);
2892 }
7700fc4f
SL
2893#ifdef CONFIG_CFQ_GROUP_IOSCHED
2894 __cfq_update_io_thinktime(&cfqq->cfqg->ttime, cfqd->cfq_group_idle);
2895#endif
22e2c507 2896}
1da177e4 2897
206dc69b 2898static void
b2c18e1e 2899cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
6d048f53 2900 struct request *rq)
206dc69b 2901{
3dde36dd 2902 sector_t sdist = 0;
41647e7a 2903 sector_t n_sec = blk_rq_sectors(rq);
3dde36dd
CZ
2904 if (cfqq->last_request_pos) {
2905 if (cfqq->last_request_pos < blk_rq_pos(rq))
2906 sdist = blk_rq_pos(rq) - cfqq->last_request_pos;
2907 else
2908 sdist = cfqq->last_request_pos - blk_rq_pos(rq);
2909 }
206dc69b 2910
3dde36dd 2911 cfqq->seek_history <<= 1;
41647e7a
CZ
2912 if (blk_queue_nonrot(cfqd->queue))
2913 cfqq->seek_history |= (n_sec < CFQQ_SECT_THR_NONROT);
2914 else
2915 cfqq->seek_history |= (sdist > CFQQ_SEEK_THR);
206dc69b 2916}
1da177e4 2917
22e2c507
JA
2918/*
2919 * Disable idle window if the process thinks too long or seeks so much that
2920 * it doesn't matter
2921 */
2922static void
2923cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
c5869807 2924 struct cfq_io_cq *cic)
22e2c507 2925{
7b679138 2926 int old_idle, enable_idle;
1be92f2f 2927
0871714e
JA
2928 /*
2929 * Don't idle for async or idle io prio class
2930 */
2931 if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq))
1be92f2f
JA
2932 return;
2933
c265a7f4 2934 enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
1da177e4 2935
76280aff
CZ
2936 if (cfqq->queued[0] + cfqq->queued[1] >= 4)
2937 cfq_mark_cfqq_deep(cfqq);
2938
749ef9f8
CZ
2939 if (cfqq->next_rq && (cfqq->next_rq->cmd_flags & REQ_NOIDLE))
2940 enable_idle = 0;
c5869807
TH
2941 else if (!atomic_read(&cic->icq.ioc->nr_tasks) ||
2942 !cfqd->cfq_slice_idle ||
2943 (!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq)))
22e2c507 2944 enable_idle = 0;
383cd721
SL
2945 else if (sample_valid(cic->ttime.ttime_samples)) {
2946 if (cic->ttime.ttime_mean > cfqd->cfq_slice_idle)
22e2c507
JA
2947 enable_idle = 0;
2948 else
2949 enable_idle = 1;
1da177e4
LT
2950 }
2951
7b679138
JA
2952 if (old_idle != enable_idle) {
2953 cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle);
2954 if (enable_idle)
2955 cfq_mark_cfqq_idle_window(cfqq);
2956 else
2957 cfq_clear_cfqq_idle_window(cfqq);
2958 }
22e2c507 2959}
1da177e4 2960
22e2c507
JA
2961/*
2962 * Check if new_cfqq should preempt the currently active queue. Return 0 for
2963 * no or if we aren't sure, a 1 will cause a preempt.
2964 */
a6151c3a 2965static bool
22e2c507 2966cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
5e705374 2967 struct request *rq)
22e2c507 2968{
6d048f53 2969 struct cfq_queue *cfqq;
22e2c507 2970
6d048f53
JA
2971 cfqq = cfqd->active_queue;
2972 if (!cfqq)
a6151c3a 2973 return false;
22e2c507 2974
6d048f53 2975 if (cfq_class_idle(new_cfqq))
a6151c3a 2976 return false;
22e2c507
JA
2977
2978 if (cfq_class_idle(cfqq))
a6151c3a 2979 return true;
1e3335de 2980
875feb63
DS
2981 /*
2982 * Don't allow a non-RT request to preempt an ongoing RT cfqq timeslice.
2983 */
2984 if (cfq_class_rt(cfqq) && !cfq_class_rt(new_cfqq))
2985 return false;
2986
374f84ac
JA
2987 /*
2988 * if the new request is sync, but the currently running queue is
2989 * not, let the sync request have priority.
2990 */
5e705374 2991 if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
a6151c3a 2992 return true;
1e3335de 2993
8682e1f1
VG
2994 if (new_cfqq->cfqg != cfqq->cfqg)
2995 return false;
2996
2997 if (cfq_slice_used(cfqq))
2998 return true;
2999
3000 /* Allow preemption only if we are idling on sync-noidle tree */
3001 if (cfqd->serving_type == SYNC_NOIDLE_WORKLOAD &&
3002 cfqq_type(new_cfqq) == SYNC_NOIDLE_WORKLOAD &&
3003 new_cfqq->service_tree->count == 2 &&
3004 RB_EMPTY_ROOT(&cfqq->sort_list))
3005 return true;
3006
b53d1ed7
JA
3007 /*
3008 * So both queues are sync. Let the new request get disk time if
3009 * it's a metadata request and the current queue is doing regular IO.
3010 */
65299a3b 3011 if ((rq->cmd_flags & REQ_PRIO) && !cfqq->prio_pending)
b53d1ed7
JA
3012 return true;
3013
3a9a3f6c
DS
3014 /*
3015 * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
3016 */
3017 if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
a6151c3a 3018 return true;
3a9a3f6c 3019
d2d59e18
SL
3020 /* An idle queue should not be idle now for some reason */
3021 if (RB_EMPTY_ROOT(&cfqq->sort_list) && !cfq_should_idle(cfqd, cfqq))
3022 return true;
3023
1e3335de 3024 if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
a6151c3a 3025 return false;
1e3335de
JA
3026
3027 /*
3028 * if this request is as-good as one we would expect from the
3029 * current cfqq, let it preempt
3030 */
e9ce335d 3031 if (cfq_rq_close(cfqd, cfqq, rq))
a6151c3a 3032 return true;
1e3335de 3033
a6151c3a 3034 return false;
22e2c507
JA
3035}
3036
3037/*
3038 * cfqq preempts the active queue. if we allowed preempt with no slice left,
3039 * let it have half of its nominal slice.
3040 */
3041static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3042{
df0793ab
SL
3043 enum wl_type_t old_type = cfqq_type(cfqd->active_queue);
3044
7b679138 3045 cfq_log_cfqq(cfqd, cfqq, "preempt");
df0793ab 3046 cfq_slice_expired(cfqd, 1);
22e2c507 3047
f8ae6e3e
SL
3048 /*
3049 * workload type is changed, don't save slice, otherwise preempt
3050 * doesn't happen
3051 */
df0793ab 3052 if (old_type != cfqq_type(cfqq))
f8ae6e3e
SL
3053 cfqq->cfqg->saved_workload_slice = 0;
3054
bf572256
JA
3055 /*
3056 * Put the new queue at the front of the of the current list,
3057 * so we know that it will be selected next.
3058 */
3059 BUG_ON(!cfq_cfqq_on_rr(cfqq));
edd75ffd
JA
3060
3061 cfq_service_tree_add(cfqd, cfqq, 1);
eda5e0c9 3062
62a37f6b
JT
3063 cfqq->slice_end = 0;
3064 cfq_mark_cfqq_slice_new(cfqq);
22e2c507
JA
3065}
3066
22e2c507 3067/*
5e705374 3068 * Called when a new fs request (rq) is added (to cfqq). Check if there's
22e2c507
JA
3069 * something we should do about it
3070 */
3071static void
5e705374
JA
3072cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3073 struct request *rq)
22e2c507 3074{
c5869807 3075 struct cfq_io_cq *cic = RQ_CIC(rq);
12e9fddd 3076
45333d5a 3077 cfqd->rq_queued++;
65299a3b
CH
3078 if (rq->cmd_flags & REQ_PRIO)
3079 cfqq->prio_pending++;
374f84ac 3080
383cd721 3081 cfq_update_io_thinktime(cfqd, cfqq, cic);
b2c18e1e 3082 cfq_update_io_seektime(cfqd, cfqq, rq);
9c2c38a1
JA
3083 cfq_update_idle_window(cfqd, cfqq, cic);
3084
b2c18e1e 3085 cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
22e2c507
JA
3086
3087 if (cfqq == cfqd->active_queue) {
3088 /*
b029195d
JA
3089 * Remember that we saw a request from this process, but
3090 * don't start queuing just yet. Otherwise we risk seeing lots
3091 * of tiny requests, because we disrupt the normal plugging
d6ceb25e
JA
3092 * and merging. If the request is already larger than a single
3093 * page, let it rip immediately. For that case we assume that
2d870722
JA
3094 * merging is already done. Ditto for a busy system that
3095 * has other work pending, don't risk delaying until the
3096 * idle timer unplug to continue working.
22e2c507 3097 */
d6ceb25e 3098 if (cfq_cfqq_wait_request(cfqq)) {
2d870722
JA
3099 if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
3100 cfqd->busy_queues > 1) {
812df48d 3101 cfq_del_timer(cfqd, cfqq);
554554f6 3102 cfq_clear_cfqq_wait_request(cfqq);
24ecfbe2 3103 __blk_run_queue(cfqd->queue);
a11cdaa7 3104 } else {
e98ef89b 3105 cfq_blkiocg_update_idle_time_stats(
c1768268
TH
3106 cfqg_to_blkg(cfqq->cfqg),
3107 &blkio_policy_cfq);
bf791937 3108 cfq_mark_cfqq_must_dispatch(cfqq);
a11cdaa7 3109 }
d6ceb25e 3110 }
5e705374 3111 } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
22e2c507
JA
3112 /*
3113 * not the active queue - expire current slice if it is
3114 * idle and has expired it's mean thinktime or this new queue
3a9a3f6c
DS
3115 * has some old slice time left and is of higher priority or
3116 * this new queue is RT and the current one is BE
22e2c507
JA
3117 */
3118 cfq_preempt_queue(cfqd, cfqq);
24ecfbe2 3119 __blk_run_queue(cfqd->queue);
22e2c507 3120 }
1da177e4
LT
3121}
3122
165125e1 3123static void cfq_insert_request(struct request_queue *q, struct request *rq)
1da177e4 3124{
b4878f24 3125 struct cfq_data *cfqd = q->elevator->elevator_data;
5e705374 3126 struct cfq_queue *cfqq = RQ_CFQQ(rq);
22e2c507 3127
7b679138 3128 cfq_log_cfqq(cfqd, cfqq, "insert_request");
c5869807 3129 cfq_init_prio_data(cfqq, RQ_CIC(rq)->icq.ioc);
1da177e4 3130
30996f40 3131 rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
22e2c507 3132 list_add_tail(&rq->queuelist, &cfqq->fifo);
aa6f6a3d 3133 cfq_add_rq_rb(rq);
0381411e 3134 cfq_blkiocg_update_io_add_stats(cfqg_to_blkg(RQ_CFQG(rq)),
c1768268 3135 &blkio_policy_cfq,
0381411e
TH
3136 cfqg_to_blkg(cfqd->serving_group),
3137 rq_data_dir(rq), rq_is_sync(rq));
5e705374 3138 cfq_rq_enqueued(cfqd, cfqq, rq);
1da177e4
LT
3139}
3140
45333d5a
AC
3141/*
3142 * Update hw_tag based on peak queue depth over 50 samples under
3143 * sufficient load.
3144 */
3145static void cfq_update_hw_tag(struct cfq_data *cfqd)
3146{
1a1238a7
SL
3147 struct cfq_queue *cfqq = cfqd->active_queue;
3148
53c583d2
CZ
3149 if (cfqd->rq_in_driver > cfqd->hw_tag_est_depth)
3150 cfqd->hw_tag_est_depth = cfqd->rq_in_driver;
e459dd08
CZ
3151
3152 if (cfqd->hw_tag == 1)
3153 return;
45333d5a
AC
3154
3155 if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN &&
53c583d2 3156 cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN)
45333d5a
AC
3157 return;
3158
1a1238a7
SL
3159 /*
3160 * If active queue hasn't enough requests and can idle, cfq might not
3161 * dispatch sufficient requests to hardware. Don't zero hw_tag in this
3162 * case
3163 */
3164 if (cfqq && cfq_cfqq_idle_window(cfqq) &&
3165 cfqq->dispatched + cfqq->queued[0] + cfqq->queued[1] <
53c583d2 3166 CFQ_HW_QUEUE_MIN && cfqd->rq_in_driver < CFQ_HW_QUEUE_MIN)
1a1238a7
SL
3167 return;
3168
45333d5a
AC
3169 if (cfqd->hw_tag_samples++ < 50)
3170 return;
3171
e459dd08 3172 if (cfqd->hw_tag_est_depth >= CFQ_HW_QUEUE_MIN)
45333d5a
AC
3173 cfqd->hw_tag = 1;
3174 else
3175 cfqd->hw_tag = 0;
45333d5a
AC
3176}
3177
7667aa06
VG
3178static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3179{
c5869807 3180 struct cfq_io_cq *cic = cfqd->active_cic;
7667aa06 3181
02a8f01b
JT
3182 /* If the queue already has requests, don't wait */
3183 if (!RB_EMPTY_ROOT(&cfqq->sort_list))
3184 return false;
3185
7667aa06
VG
3186 /* If there are other queues in the group, don't wait */
3187 if (cfqq->cfqg->nr_cfqq > 1)
3188 return false;
3189
7700fc4f
SL
3190 /* the only queue in the group, but think time is big */
3191 if (cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true))
3192 return false;
3193
7667aa06
VG
3194 if (cfq_slice_used(cfqq))
3195 return true;
3196
3197 /* if slice left is less than think time, wait busy */
383cd721
SL
3198 if (cic && sample_valid(cic->ttime.ttime_samples)
3199 && (cfqq->slice_end - jiffies < cic->ttime.ttime_mean))
7667aa06
VG
3200 return true;
3201
3202 /*
3203 * If think times is less than a jiffy than ttime_mean=0 and above
3204 * will not be true. It might happen that slice has not expired yet
3205 * but will expire soon (4-5 ns) during select_queue(). To cover the
3206 * case where think time is less than a jiffy, mark the queue wait
3207 * busy if only 1 jiffy is left in the slice.
3208 */
3209 if (cfqq->slice_end - jiffies == 1)
3210 return true;
3211
3212 return false;
3213}
3214
165125e1 3215static void cfq_completed_request(struct request_queue *q, struct request *rq)
1da177e4 3216{
5e705374 3217 struct cfq_queue *cfqq = RQ_CFQQ(rq);
b4878f24 3218 struct cfq_data *cfqd = cfqq->cfqd;
5380a101 3219 const int sync = rq_is_sync(rq);
b4878f24 3220 unsigned long now;
1da177e4 3221
b4878f24 3222 now = jiffies;
33659ebb
CH
3223 cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d",
3224 !!(rq->cmd_flags & REQ_NOIDLE));
1da177e4 3225
45333d5a
AC
3226 cfq_update_hw_tag(cfqd);
3227
53c583d2 3228 WARN_ON(!cfqd->rq_in_driver);
6d048f53 3229 WARN_ON(!cfqq->dispatched);
53c583d2 3230 cfqd->rq_in_driver--;
6d048f53 3231 cfqq->dispatched--;
80bdf0c7 3232 (RQ_CFQG(rq))->dispatched--;
0381411e 3233 cfq_blkiocg_update_completion_stats(cfqg_to_blkg(cfqq->cfqg),
c1768268
TH
3234 &blkio_policy_cfq, rq_start_time_ns(rq),
3235 rq_io_start_time_ns(rq), rq_data_dir(rq),
3236 rq_is_sync(rq));
1da177e4 3237
53c583d2 3238 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
3ed9a296 3239
365722bb 3240 if (sync) {
f5f2b6ce
SL
3241 struct cfq_rb_root *service_tree;
3242
383cd721 3243 RQ_CIC(rq)->ttime.last_end_request = now;
f5f2b6ce
SL
3244
3245 if (cfq_cfqq_on_rr(cfqq))
3246 service_tree = cfqq->service_tree;
3247 else
3248 service_tree = service_tree_for(cfqq->cfqg,
3249 cfqq_prio(cfqq), cfqq_type(cfqq));
3250 service_tree->ttime.last_end_request = now;
573412b2
CZ
3251 if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now))
3252 cfqd->last_delayed_sync = now;
365722bb 3253 }
caaa5f9f 3254
7700fc4f
SL
3255#ifdef CONFIG_CFQ_GROUP_IOSCHED
3256 cfqq->cfqg->ttime.last_end_request = now;
3257#endif
3258
caaa5f9f
JA
3259 /*
3260 * If this is the active queue, check if it needs to be expired,
3261 * or if we want to idle in case it has no pending requests.
3262 */
3263 if (cfqd->active_queue == cfqq) {
a36e71f9
JA
3264 const bool cfqq_empty = RB_EMPTY_ROOT(&cfqq->sort_list);
3265
44f7c160
JA
3266 if (cfq_cfqq_slice_new(cfqq)) {
3267 cfq_set_prio_slice(cfqd, cfqq);
3268 cfq_clear_cfqq_slice_new(cfqq);
3269 }
f75edf2d
VG
3270
3271 /*
7667aa06
VG
3272 * Should we wait for next request to come in before we expire
3273 * the queue.
f75edf2d 3274 */
7667aa06 3275 if (cfq_should_wait_busy(cfqd, cfqq)) {
80bdf0c7
VG
3276 unsigned long extend_sl = cfqd->cfq_slice_idle;
3277 if (!cfqd->cfq_slice_idle)
3278 extend_sl = cfqd->cfq_group_idle;
3279 cfqq->slice_end = jiffies + extend_sl;
f75edf2d 3280 cfq_mark_cfqq_wait_busy(cfqq);
b1ffe737 3281 cfq_log_cfqq(cfqd, cfqq, "will busy wait");
f75edf2d
VG
3282 }
3283
a36e71f9 3284 /*
8e550632
CZ
3285 * Idling is not enabled on:
3286 * - expired queues
3287 * - idle-priority queues
3288 * - async queues
3289 * - queues with still some requests queued
3290 * - when there is a close cooperator
a36e71f9 3291 */
0871714e 3292 if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
e5ff082e 3293 cfq_slice_expired(cfqd, 1);
8e550632
CZ
3294 else if (sync && cfqq_empty &&
3295 !cfq_close_cooperator(cfqd, cfqq)) {
749ef9f8 3296 cfq_arm_slice_timer(cfqd);
8e550632 3297 }
caaa5f9f 3298 }
6d048f53 3299
53c583d2 3300 if (!cfqd->rq_in_driver)
23e018a1 3301 cfq_schedule_dispatch(cfqd);
1da177e4
LT
3302}
3303
89850f7e 3304static inline int __cfq_may_queue(struct cfq_queue *cfqq)
22e2c507 3305{
1b379d8d 3306 if (cfq_cfqq_wait_request(cfqq) && !cfq_cfqq_must_alloc_slice(cfqq)) {
3b18152c 3307 cfq_mark_cfqq_must_alloc_slice(cfqq);
22e2c507 3308 return ELV_MQUEUE_MUST;
3b18152c 3309 }
1da177e4 3310
22e2c507 3311 return ELV_MQUEUE_MAY;
22e2c507
JA
3312}
3313
165125e1 3314static int cfq_may_queue(struct request_queue *q, int rw)
22e2c507
JA
3315{
3316 struct cfq_data *cfqd = q->elevator->elevator_data;
3317 struct task_struct *tsk = current;
c5869807 3318 struct cfq_io_cq *cic;
22e2c507
JA
3319 struct cfq_queue *cfqq;
3320
3321 /*
3322 * don't force setup of a queue from here, as a call to may_queue
3323 * does not necessarily imply that a request actually will be queued.
3324 * so just lookup a possibly existing queue, or return 'may queue'
3325 * if that fails
3326 */
4ac845a2 3327 cic = cfq_cic_lookup(cfqd, tsk->io_context);
91fac317
VT
3328 if (!cic)
3329 return ELV_MQUEUE_MAY;
3330
b0b78f81 3331 cfqq = cic_to_cfqq(cic, rw_is_sync(rw));
22e2c507 3332 if (cfqq) {
c5869807 3333 cfq_init_prio_data(cfqq, cic->icq.ioc);
22e2c507 3334
89850f7e 3335 return __cfq_may_queue(cfqq);
22e2c507
JA
3336 }
3337
3338 return ELV_MQUEUE_MAY;
1da177e4
LT
3339}
3340
1da177e4
LT
3341/*
3342 * queue lock held here
3343 */
bb37b94c 3344static void cfq_put_request(struct request *rq)
1da177e4 3345{
5e705374 3346 struct cfq_queue *cfqq = RQ_CFQQ(rq);
1da177e4 3347
5e705374 3348 if (cfqq) {
22e2c507 3349 const int rw = rq_data_dir(rq);
1da177e4 3350
22e2c507
JA
3351 BUG_ON(!cfqq->allocated[rw]);
3352 cfqq->allocated[rw]--;
1da177e4 3353
7f1dc8a2 3354 /* Put down rq reference on cfqg */
1adaf3dd 3355 blkg_put(cfqg_to_blkg(RQ_CFQG(rq)));
a612fddf
TH
3356 rq->elv.priv[0] = NULL;
3357 rq->elv.priv[1] = NULL;
7f1dc8a2 3358
1da177e4
LT
3359 cfq_put_queue(cfqq);
3360 }
3361}
3362
df5fe3e8 3363static struct cfq_queue *
c5869807 3364cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_cq *cic,
df5fe3e8
JM
3365 struct cfq_queue *cfqq)
3366{
3367 cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq);
3368 cic_set_cfqq(cic, cfqq->new_cfqq, 1);
b3b6d040 3369 cfq_mark_cfqq_coop(cfqq->new_cfqq);
df5fe3e8
JM
3370 cfq_put_queue(cfqq);
3371 return cic_to_cfqq(cic, 1);
3372}
3373
e6c5bc73
JM
3374/*
3375 * Returns NULL if a new cfqq should be allocated, or the old cfqq if this
3376 * was the last process referring to said cfqq.
3377 */
3378static struct cfq_queue *
c5869807 3379split_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq)
e6c5bc73
JM
3380{
3381 if (cfqq_process_refs(cfqq) == 1) {
e6c5bc73
JM
3382 cfqq->pid = current->pid;
3383 cfq_clear_cfqq_coop(cfqq);
ae54abed 3384 cfq_clear_cfqq_split_coop(cfqq);
e6c5bc73
JM
3385 return cfqq;
3386 }
3387
3388 cic_set_cfqq(cic, NULL, 1);
d02a2c07
SL
3389
3390 cfq_put_cooperator(cfqq);
3391
e6c5bc73
JM
3392 cfq_put_queue(cfqq);
3393 return NULL;
3394}
1da177e4 3395/*
22e2c507 3396 * Allocate cfq data structures associated with this request.
1da177e4 3397 */
22e2c507 3398static int
165125e1 3399cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
1da177e4
LT
3400{
3401 struct cfq_data *cfqd = q->elevator->elevator_data;
f1f8cc94 3402 struct cfq_io_cq *cic = icq_to_cic(rq->elv.icq);
1da177e4 3403 const int rw = rq_data_dir(rq);
a6151c3a 3404 const bool is_sync = rq_is_sync(rq);
22e2c507 3405 struct cfq_queue *cfqq;
d705ae6b 3406 unsigned int changed;
1da177e4
LT
3407
3408 might_sleep_if(gfp_mask & __GFP_WAIT);
3409
216284c3 3410 spin_lock_irq(q->queue_lock);
f1f8cc94
TH
3411
3412 /* handle changed notifications */
d705ae6b
TH
3413 changed = icq_get_changed(&cic->icq);
3414 if (unlikely(changed & ICQ_IOPRIO_CHANGED))
3415 changed_ioprio(cic);
f1f8cc94 3416#ifdef CONFIG_CFQ_GROUP_IOSCHED
d705ae6b
TH
3417 if (unlikely(changed & ICQ_CGROUP_CHANGED))
3418 changed_cgroup(cic);
f1f8cc94 3419#endif
22e2c507 3420
e6c5bc73 3421new_queue:
91fac317 3422 cfqq = cic_to_cfqq(cic, is_sync);
32f2e807 3423 if (!cfqq || cfqq == &cfqd->oom_cfqq) {
c5869807 3424 cfqq = cfq_get_queue(cfqd, is_sync, cic->icq.ioc, gfp_mask);
91fac317 3425 cic_set_cfqq(cic, cfqq, is_sync);
df5fe3e8 3426 } else {
e6c5bc73
JM
3427 /*
3428 * If the queue was seeky for too long, break it apart.
3429 */
ae54abed 3430 if (cfq_cfqq_coop(cfqq) && cfq_cfqq_split_coop(cfqq)) {
e6c5bc73
JM
3431 cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq");
3432 cfqq = split_cfqq(cic, cfqq);
3433 if (!cfqq)
3434 goto new_queue;
3435 }
3436
df5fe3e8
JM
3437 /*
3438 * Check to see if this queue is scheduled to merge with
3439 * another, closely cooperating queue. The merging of
3440 * queues happens here as it must be done in process context.
3441 * The reference on new_cfqq was taken in merge_cfqqs.
3442 */
3443 if (cfqq->new_cfqq)
3444 cfqq = cfq_merge_cfqqs(cfqd, cic, cfqq);
91fac317 3445 }
1da177e4
LT
3446
3447 cfqq->allocated[rw]++;
1da177e4 3448
6fae9c25 3449 cfqq->ref++;
1adaf3dd 3450 blkg_get(cfqg_to_blkg(cfqq->cfqg));
a612fddf 3451 rq->elv.priv[0] = cfqq;
1adaf3dd 3452 rq->elv.priv[1] = cfqq->cfqg;
216284c3 3453 spin_unlock_irq(q->queue_lock);
5e705374 3454 return 0;
1da177e4
LT
3455}
3456
65f27f38 3457static void cfq_kick_queue(struct work_struct *work)
22e2c507 3458{
65f27f38 3459 struct cfq_data *cfqd =
23e018a1 3460 container_of(work, struct cfq_data, unplug_work);
165125e1 3461 struct request_queue *q = cfqd->queue;
22e2c507 3462
40bb54d1 3463 spin_lock_irq(q->queue_lock);
24ecfbe2 3464 __blk_run_queue(cfqd->queue);
40bb54d1 3465 spin_unlock_irq(q->queue_lock);
22e2c507
JA
3466}
3467
3468/*
3469 * Timer running if the active_queue is currently idling inside its time slice
3470 */
3471static void cfq_idle_slice_timer(unsigned long data)
3472{
3473 struct cfq_data *cfqd = (struct cfq_data *) data;
3474 struct cfq_queue *cfqq;
3475 unsigned long flags;
3c6bd2f8 3476 int timed_out = 1;
22e2c507 3477
7b679138
JA
3478 cfq_log(cfqd, "idle timer fired");
3479
22e2c507
JA
3480 spin_lock_irqsave(cfqd->queue->queue_lock, flags);
3481
fe094d98
JA
3482 cfqq = cfqd->active_queue;
3483 if (cfqq) {
3c6bd2f8
JA
3484 timed_out = 0;
3485
b029195d
JA
3486 /*
3487 * We saw a request before the queue expired, let it through
3488 */
3489 if (cfq_cfqq_must_dispatch(cfqq))
3490 goto out_kick;
3491
22e2c507
JA
3492 /*
3493 * expired
3494 */
44f7c160 3495 if (cfq_slice_used(cfqq))
22e2c507
JA
3496 goto expire;
3497
3498 /*
3499 * only expire and reinvoke request handler, if there are
3500 * other queues with pending requests
3501 */
caaa5f9f 3502 if (!cfqd->busy_queues)
22e2c507 3503 goto out_cont;
22e2c507
JA
3504
3505 /*
3506 * not expired and it has a request pending, let it dispatch
3507 */
75e50984 3508 if (!RB_EMPTY_ROOT(&cfqq->sort_list))
22e2c507 3509 goto out_kick;
76280aff
CZ
3510
3511 /*
3512 * Queue depth flag is reset only when the idle didn't succeed
3513 */
3514 cfq_clear_cfqq_deep(cfqq);
22e2c507
JA
3515 }
3516expire:
e5ff082e 3517 cfq_slice_expired(cfqd, timed_out);
22e2c507 3518out_kick:
23e018a1 3519 cfq_schedule_dispatch(cfqd);
22e2c507
JA
3520out_cont:
3521 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
3522}
3523
3b18152c
JA
3524static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
3525{
3526 del_timer_sync(&cfqd->idle_slice_timer);
23e018a1 3527 cancel_work_sync(&cfqd->unplug_work);
3b18152c 3528}
22e2c507 3529
c2dea2d1
VT
3530static void cfq_put_async_queues(struct cfq_data *cfqd)
3531{
3532 int i;
3533
3534 for (i = 0; i < IOPRIO_BE_NR; i++) {
3535 if (cfqd->async_cfqq[0][i])
3536 cfq_put_queue(cfqd->async_cfqq[0][i]);
3537 if (cfqd->async_cfqq[1][i])
3538 cfq_put_queue(cfqd->async_cfqq[1][i]);
c2dea2d1 3539 }
2389d1ef
ON
3540
3541 if (cfqd->async_idle_cfqq)
3542 cfq_put_queue(cfqd->async_idle_cfqq);
c2dea2d1
VT
3543}
3544
b374d18a 3545static void cfq_exit_queue(struct elevator_queue *e)
1da177e4 3546{
22e2c507 3547 struct cfq_data *cfqd = e->elevator_data;
165125e1 3548 struct request_queue *q = cfqd->queue;
56edf7d7 3549 bool wait = false;
22e2c507 3550
3b18152c 3551 cfq_shutdown_timer_wq(cfqd);
e2d74ac0 3552
d9ff4187 3553 spin_lock_irq(q->queue_lock);
e2d74ac0 3554
d9ff4187 3555 if (cfqd->active_queue)
e5ff082e 3556 __cfq_slice_expired(cfqd, cfqd->active_queue, 0);
e2d74ac0 3557
c2dea2d1 3558 cfq_put_async_queues(cfqd);
b1c35769 3559 cfq_release_cfq_groups(cfqd);
56edf7d7
VG
3560
3561 /*
3562 * If there are groups which we could not unlink from blkcg list,
3563 * wait for a rcu period for them to be freed.
3564 */
3565 if (cfqd->nr_blkcg_linked_grps)
3566 wait = true;
15c31be4 3567
d9ff4187 3568 spin_unlock_irq(q->queue_lock);
a90d742e
AV
3569
3570 cfq_shutdown_timer_wq(cfqd);
3571
56edf7d7
VG
3572 /*
3573 * Wait for cfqg->blkg->key accessors to exit their grace periods.
3574 * Do this wait only if there are other unlinked groups out
3575 * there. This can happen if cgroup deletion path claimed the
3576 * responsibility of cleaning up a group before queue cleanup code
3577 * get to the group.
3578 *
3579 * Do not call synchronize_rcu() unconditionally as there are drivers
3580 * which create/delete request queue hundreds of times during scan/boot
3581 * and synchronize_rcu() can take significant time and slow down boot.
3582 */
3583 if (wait)
3584 synchronize_rcu();
2abae55f 3585
f51b802c
TH
3586#ifndef CONFIG_CFQ_GROUP_IOSCHED
3587 kfree(cfqd->root_group);
2abae55f 3588#endif
56edf7d7 3589 kfree(cfqd);
1da177e4
LT
3590}
3591
b2fab5ac 3592static int cfq_init_queue(struct request_queue *q)
1da177e4
LT
3593{
3594 struct cfq_data *cfqd;
cd1604fa 3595 struct blkio_group *blkg __maybe_unused;
f51b802c 3596 int i;
1da177e4 3597
94f6030c 3598 cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
a73f730d 3599 if (!cfqd)
b2fab5ac 3600 return -ENOMEM;
80b15c73 3601
f51b802c
TH
3602 cfqd->queue = q;
3603 q->elevator->elevator_data = cfqd;
3604
1fa8f6d6
VG
3605 /* Init root service tree */
3606 cfqd->grp_service_tree = CFQ_RB_ROOT;
3607
f51b802c 3608 /* Init root group and prefer root group over other groups by default */
25fb5169 3609#ifdef CONFIG_CFQ_GROUP_IOSCHED
f51b802c
TH
3610 rcu_read_lock();
3611 spin_lock_irq(q->queue_lock);
5624a4e4 3612
cd1604fa
TH
3613 blkg = blkg_lookup_create(&blkio_root_cgroup, q, BLKIO_POLICY_PROP,
3614 true);
3615 if (!IS_ERR(blkg))
0381411e 3616 cfqd->root_group = blkg_to_cfqg(blkg);
f51b802c
TH
3617
3618 spin_unlock_irq(q->queue_lock);
3619 rcu_read_unlock();
3620#else
3621 cfqd->root_group = kzalloc_node(sizeof(*cfqd->root_group),
3622 GFP_KERNEL, cfqd->queue->node);
3623 if (cfqd->root_group)
3624 cfq_init_cfqg_base(cfqd->root_group);
3625#endif
3626 if (!cfqd->root_group) {
5624a4e4 3627 kfree(cfqd);
b2fab5ac 3628 return -ENOMEM;
5624a4e4
VG
3629 }
3630
f51b802c 3631 cfqd->root_group->weight = 2*BLKIO_WEIGHT_DEFAULT;
5624a4e4 3632
26a2ac00
JA
3633 /*
3634 * Not strictly needed (since RB_ROOT just clears the node and we
3635 * zeroed cfqd on alloc), but better be safe in case someone decides
3636 * to add magic to the rb code
3637 */
3638 for (i = 0; i < CFQ_PRIO_LISTS; i++)
3639 cfqd->prio_trees[i] = RB_ROOT;
3640
6118b70b
JA
3641 /*
3642 * Our fallback cfqq if cfq_find_alloc_queue() runs into OOM issues.
3643 * Grab a permanent reference to it, so that the normal code flow
f51b802c
TH
3644 * will not attempt to free it. oom_cfqq is linked to root_group
3645 * but shouldn't hold a reference as it'll never be unlinked. Lose
3646 * the reference from linking right away.
6118b70b
JA
3647 */
3648 cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
30d7b944 3649 cfqd->oom_cfqq.ref++;
1adaf3dd
TH
3650
3651 spin_lock_irq(q->queue_lock);
f51b802c 3652 cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, cfqd->root_group);
1adaf3dd
TH
3653 blkg_put(cfqg_to_blkg(cfqd->root_group));
3654 spin_unlock_irq(q->queue_lock);
1da177e4 3655
22e2c507
JA
3656 init_timer(&cfqd->idle_slice_timer);
3657 cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
3658 cfqd->idle_slice_timer.data = (unsigned long) cfqd;
3659
23e018a1 3660 INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
22e2c507 3661
1da177e4 3662 cfqd->cfq_quantum = cfq_quantum;
22e2c507
JA
3663 cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
3664 cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
1da177e4
LT
3665 cfqd->cfq_back_max = cfq_back_max;
3666 cfqd->cfq_back_penalty = cfq_back_penalty;
22e2c507
JA
3667 cfqd->cfq_slice[0] = cfq_slice_async;
3668 cfqd->cfq_slice[1] = cfq_slice_sync;
3669 cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
3670 cfqd->cfq_slice_idle = cfq_slice_idle;
80bdf0c7 3671 cfqd->cfq_group_idle = cfq_group_idle;
963b72fc 3672 cfqd->cfq_latency = 1;
e459dd08 3673 cfqd->hw_tag = -1;
edc71131
CZ
3674 /*
3675 * we optimistically start assuming sync ops weren't delayed in last
3676 * second, in order to have larger depth for async operations.
3677 */
573412b2 3678 cfqd->last_delayed_sync = jiffies - HZ;
b2fab5ac 3679 return 0;
1da177e4
LT
3680}
3681
1da177e4
LT
3682/*
3683 * sysfs parts below -->
3684 */
1da177e4
LT
3685static ssize_t
3686cfq_var_show(unsigned int var, char *page)
3687{
3688 return sprintf(page, "%d\n", var);
3689}
3690
3691static ssize_t
3692cfq_var_store(unsigned int *var, const char *page, size_t count)
3693{
3694 char *p = (char *) page;
3695
3696 *var = simple_strtoul(p, &p, 10);
3697 return count;
3698}
3699
1da177e4 3700#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
b374d18a 3701static ssize_t __FUNC(struct elevator_queue *e, char *page) \
1da177e4 3702{ \
3d1ab40f 3703 struct cfq_data *cfqd = e->elevator_data; \
1da177e4
LT
3704 unsigned int __data = __VAR; \
3705 if (__CONV) \
3706 __data = jiffies_to_msecs(__data); \
3707 return cfq_var_show(__data, (page)); \
3708}
3709SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
22e2c507
JA
3710SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
3711SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
e572ec7e
AV
3712SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
3713SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
22e2c507 3714SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
80bdf0c7 3715SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1);
22e2c507
JA
3716SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
3717SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
3718SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
963b72fc 3719SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
1da177e4
LT
3720#undef SHOW_FUNCTION
3721
3722#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
b374d18a 3723static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
1da177e4 3724{ \
3d1ab40f 3725 struct cfq_data *cfqd = e->elevator_data; \
1da177e4
LT
3726 unsigned int __data; \
3727 int ret = cfq_var_store(&__data, (page), count); \
3728 if (__data < (MIN)) \
3729 __data = (MIN); \
3730 else if (__data > (MAX)) \
3731 __data = (MAX); \
3732 if (__CONV) \
3733 *(__PTR) = msecs_to_jiffies(__data); \
3734 else \
3735 *(__PTR) = __data; \
3736 return ret; \
3737}
3738STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
fe094d98
JA
3739STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1,
3740 UINT_MAX, 1);
3741STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1,
3742 UINT_MAX, 1);
e572ec7e 3743STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
fe094d98
JA
3744STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
3745 UINT_MAX, 0);
22e2c507 3746STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
80bdf0c7 3747STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1);
22e2c507
JA
3748STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
3749STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
fe094d98
JA
3750STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
3751 UINT_MAX, 0);
963b72fc 3752STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
1da177e4
LT
3753#undef STORE_FUNCTION
3754
e572ec7e
AV
3755#define CFQ_ATTR(name) \
3756 __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)
3757
3758static struct elv_fs_entry cfq_attrs[] = {
3759 CFQ_ATTR(quantum),
e572ec7e
AV
3760 CFQ_ATTR(fifo_expire_sync),
3761 CFQ_ATTR(fifo_expire_async),
3762 CFQ_ATTR(back_seek_max),
3763 CFQ_ATTR(back_seek_penalty),
3764 CFQ_ATTR(slice_sync),
3765 CFQ_ATTR(slice_async),
3766 CFQ_ATTR(slice_async_rq),
3767 CFQ_ATTR(slice_idle),
80bdf0c7 3768 CFQ_ATTR(group_idle),
963b72fc 3769 CFQ_ATTR(low_latency),
e572ec7e 3770 __ATTR_NULL
1da177e4
LT
3771};
3772
1da177e4
LT
3773static struct elevator_type iosched_cfq = {
3774 .ops = {
3775 .elevator_merge_fn = cfq_merge,
3776 .elevator_merged_fn = cfq_merged_request,
3777 .elevator_merge_req_fn = cfq_merged_requests,
da775265 3778 .elevator_allow_merge_fn = cfq_allow_merge,
812d4026 3779 .elevator_bio_merged_fn = cfq_bio_merged,
b4878f24 3780 .elevator_dispatch_fn = cfq_dispatch_requests,
1da177e4 3781 .elevator_add_req_fn = cfq_insert_request,
b4878f24 3782 .elevator_activate_req_fn = cfq_activate_request,
1da177e4 3783 .elevator_deactivate_req_fn = cfq_deactivate_request,
1da177e4 3784 .elevator_completed_req_fn = cfq_completed_request,
21183b07
JA
3785 .elevator_former_req_fn = elv_rb_former_request,
3786 .elevator_latter_req_fn = elv_rb_latter_request,
9b84cacd 3787 .elevator_init_icq_fn = cfq_init_icq,
7e5a8794 3788 .elevator_exit_icq_fn = cfq_exit_icq,
1da177e4
LT
3789 .elevator_set_req_fn = cfq_set_request,
3790 .elevator_put_req_fn = cfq_put_request,
3791 .elevator_may_queue_fn = cfq_may_queue,
3792 .elevator_init_fn = cfq_init_queue,
3793 .elevator_exit_fn = cfq_exit_queue,
3794 },
3d3c2379
TH
3795 .icq_size = sizeof(struct cfq_io_cq),
3796 .icq_align = __alignof__(struct cfq_io_cq),
3d1ab40f 3797 .elevator_attrs = cfq_attrs,
3d3c2379 3798 .elevator_name = "cfq",
1da177e4
LT
3799 .elevator_owner = THIS_MODULE,
3800};
3801
3e252066
VG
3802#ifdef CONFIG_CFQ_GROUP_IOSCHED
3803static struct blkio_policy_type blkio_policy_cfq = {
3804 .ops = {
0381411e 3805 .blkio_init_group_fn = cfq_init_blkio_group,
cd1604fa 3806 .blkio_link_group_fn = cfq_link_blkio_group,
3e252066 3807 .blkio_unlink_group_fn = cfq_unlink_blkio_group,
72e06c25 3808 .blkio_clear_queue_fn = cfq_clear_queue,
3e252066
VG
3809 .blkio_update_group_weight_fn = cfq_update_blkio_group_weight,
3810 },
062a644d 3811 .plid = BLKIO_POLICY_PROP,
0381411e 3812 .pdata_size = sizeof(struct cfq_group),
3e252066 3813};
3e252066
VG
3814#endif
3815
1da177e4
LT
3816static int __init cfq_init(void)
3817{
3d3c2379
TH
3818 int ret;
3819
22e2c507
JA
3820 /*
3821 * could be 0 on HZ < 1000 setups
3822 */
3823 if (!cfq_slice_async)
3824 cfq_slice_async = 1;
3825 if (!cfq_slice_idle)
3826 cfq_slice_idle = 1;
3827
80bdf0c7
VG
3828#ifdef CONFIG_CFQ_GROUP_IOSCHED
3829 if (!cfq_group_idle)
3830 cfq_group_idle = 1;
3831#else
3832 cfq_group_idle = 0;
3833#endif
3d3c2379
TH
3834 cfq_pool = KMEM_CACHE(cfq_queue, 0);
3835 if (!cfq_pool)
1da177e4
LT
3836 return -ENOMEM;
3837
3d3c2379
TH
3838 ret = elv_register(&iosched_cfq);
3839 if (ret) {
3840 kmem_cache_destroy(cfq_pool);
3841 return ret;
3842 }
3d3c2379 3843
b95ada55 3844#ifdef CONFIG_CFQ_GROUP_IOSCHED
3e252066 3845 blkio_policy_register(&blkio_policy_cfq);
b95ada55 3846#endif
2fdd82bd 3847 return 0;
1da177e4
LT
3848}
3849
3850static void __exit cfq_exit(void)
3851{
b95ada55 3852#ifdef CONFIG_CFQ_GROUP_IOSCHED
3e252066 3853 blkio_policy_unregister(&blkio_policy_cfq);
b95ada55 3854#endif
1da177e4 3855 elv_unregister(&iosched_cfq);
3d3c2379 3856 kmem_cache_destroy(cfq_pool);
1da177e4
LT
3857}
3858
3859module_init(cfq_init);
3860module_exit(cfq_exit);
3861
3862MODULE_AUTHOR("Jens Axboe");
3863MODULE_LICENSE("GPL");
3864MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");