blkcg: make blkcg_policy methods take a pointer to blkcg_policy_data
[linux-2.6-block.git] / block / cfq-iosched.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * CFQ, or complete fairness queueing, disk scheduler.
3 *
4 * Based on ideas from a previously unfinished io
5 * scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
6 *
0fe23479 7 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
1da177e4 8 */
1da177e4 9#include <linux/module.h>
5a0e3ad6 10#include <linux/slab.h>
1cc9be68
AV
11#include <linux/blkdev.h>
12#include <linux/elevator.h>
ad5ebd2f 13#include <linux/jiffies.h>
1da177e4 14#include <linux/rbtree.h>
22e2c507 15#include <linux/ioprio.h>
7b679138 16#include <linux/blktrace_api.h>
eea8f41c 17#include <linux/blk-cgroup.h>
6e736be7 18#include "blk.h"
1da177e4
LT
19
20/*
21 * tunables
22 */
fe094d98 23/* max queue in one round of service */
abc3c744 24static const int cfq_quantum = 8;
64100099 25static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
fe094d98
JA
26/* maximum backwards seek, in KiB */
27static const int cfq_back_max = 16 * 1024;
28/* penalty of a backwards seek */
29static const int cfq_back_penalty = 2;
64100099 30static const int cfq_slice_sync = HZ / 10;
3b18152c 31static int cfq_slice_async = HZ / 25;
64100099 32static const int cfq_slice_async_rq = 2;
caaa5f9f 33static int cfq_slice_idle = HZ / 125;
80bdf0c7 34static int cfq_group_idle = HZ / 125;
5db5d642
CZ
35static const int cfq_target_latency = HZ * 3/10; /* 300 ms */
36static const int cfq_hist_divisor = 4;
22e2c507 37
d9e7620e 38/*
0871714e 39 * offset from end of service tree
d9e7620e 40 */
0871714e 41#define CFQ_IDLE_DELAY (HZ / 5)
d9e7620e
JA
42
43/*
44 * below this threshold, we consider thinktime immediate
45 */
46#define CFQ_MIN_TT (2)
47
22e2c507 48#define CFQ_SLICE_SCALE (5)
45333d5a 49#define CFQ_HW_QUEUE_MIN (5)
25bc6b07 50#define CFQ_SERVICE_SHIFT 12
22e2c507 51
3dde36dd 52#define CFQQ_SEEK_THR (sector_t)(8 * 100)
e9ce335d 53#define CFQQ_CLOSE_THR (sector_t)(8 * 1024)
41647e7a 54#define CFQQ_SECT_THR_NONROT (sector_t)(2 * 32)
3dde36dd 55#define CFQQ_SEEKY(cfqq) (hweight32(cfqq->seek_history) > 32/8)
ae54abed 56
a612fddf
TH
57#define RQ_CIC(rq) icq_to_cic((rq)->elv.icq)
58#define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elv.priv[0])
59#define RQ_CFQG(rq) (struct cfq_group *) ((rq)->elv.priv[1])
1da177e4 60
e18b890b 61static struct kmem_cache *cfq_pool;
1da177e4 62
22e2c507
JA
63#define CFQ_PRIO_LISTS IOPRIO_BE_NR
64#define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
22e2c507
JA
65#define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
66
206dc69b 67#define sample_valid(samples) ((samples) > 80)
1fa8f6d6 68#define rb_entry_cfqg(node) rb_entry((node), struct cfq_group, rb_node)
206dc69b 69
e48453c3
AA
70/* blkio-related constants */
71#define CFQ_WEIGHT_MIN 10
72#define CFQ_WEIGHT_MAX 1000
73#define CFQ_WEIGHT_DEFAULT 500
74
c5869807
TH
75struct cfq_ttime {
76 unsigned long last_end_request;
77
78 unsigned long ttime_total;
79 unsigned long ttime_samples;
80 unsigned long ttime_mean;
81};
82
cc09e299
JA
83/*
84 * Most of our rbtree usage is for sorting with min extraction, so
85 * if we cache the leftmost node we don't have to walk down the tree
86 * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should
87 * move this into the elevator for the rq sorting as well.
88 */
89struct cfq_rb_root {
90 struct rb_root rb;
91 struct rb_node *left;
aa6f6a3d 92 unsigned count;
1fa8f6d6 93 u64 min_vdisktime;
f5f2b6ce 94 struct cfq_ttime ttime;
cc09e299 95};
f5f2b6ce
SL
96#define CFQ_RB_ROOT (struct cfq_rb_root) { .rb = RB_ROOT, \
97 .ttime = {.last_end_request = jiffies,},}
cc09e299 98
6118b70b
JA
99/*
100 * Per process-grouping structure
101 */
102struct cfq_queue {
103 /* reference count */
30d7b944 104 int ref;
6118b70b
JA
105 /* various state flags, see below */
106 unsigned int flags;
107 /* parent cfq_data */
108 struct cfq_data *cfqd;
109 /* service_tree member */
110 struct rb_node rb_node;
111 /* service_tree key */
112 unsigned long rb_key;
113 /* prio tree member */
114 struct rb_node p_node;
115 /* prio tree root we belong to, if any */
116 struct rb_root *p_root;
117 /* sorted list of pending requests */
118 struct rb_root sort_list;
119 /* if fifo isn't expired, next request to serve */
120 struct request *next_rq;
121 /* requests queued in sort_list */
122 int queued[2];
123 /* currently allocated requests */
124 int allocated[2];
125 /* fifo list of requests in sort_list */
126 struct list_head fifo;
127
dae739eb
VG
128 /* time when queue got scheduled in to dispatch first request. */
129 unsigned long dispatch_start;
f75edf2d 130 unsigned int allocated_slice;
c4081ba5 131 unsigned int slice_dispatch;
dae739eb
VG
132 /* time when first request from queue completed and slice started. */
133 unsigned long slice_start;
6118b70b
JA
134 unsigned long slice_end;
135 long slice_resid;
6118b70b 136
65299a3b
CH
137 /* pending priority requests */
138 int prio_pending;
6118b70b
JA
139 /* number of requests that are on the dispatch list or inside driver */
140 int dispatched;
141
142 /* io prio of this group */
143 unsigned short ioprio, org_ioprio;
4aede84b 144 unsigned short ioprio_class;
6118b70b 145
c4081ba5
RK
146 pid_t pid;
147
3dde36dd 148 u32 seek_history;
b2c18e1e
JM
149 sector_t last_request_pos;
150
aa6f6a3d 151 struct cfq_rb_root *service_tree;
df5fe3e8 152 struct cfq_queue *new_cfqq;
cdb16e8f 153 struct cfq_group *cfqg;
c4e7893e
VG
154 /* Number of sectors dispatched from queue in single dispatch round */
155 unsigned long nr_sectors;
6118b70b
JA
156};
157
c0324a02 158/*
718eee05 159 * First index in the service_trees.
c0324a02
CZ
160 * IDLE is handled separately, so it has negative index
161 */
3bf10fea 162enum wl_class_t {
c0324a02 163 BE_WORKLOAD = 0,
615f0259
VG
164 RT_WORKLOAD = 1,
165 IDLE_WORKLOAD = 2,
b4627321 166 CFQ_PRIO_NR,
c0324a02
CZ
167};
168
718eee05
CZ
169/*
170 * Second index in the service_trees.
171 */
172enum wl_type_t {
173 ASYNC_WORKLOAD = 0,
174 SYNC_NOIDLE_WORKLOAD = 1,
175 SYNC_WORKLOAD = 2
176};
177
155fead9
TH
178struct cfqg_stats {
179#ifdef CONFIG_CFQ_GROUP_IOSCHED
180 /* total bytes transferred */
181 struct blkg_rwstat service_bytes;
182 /* total IOs serviced, post merge */
183 struct blkg_rwstat serviced;
184 /* number of ios merged */
185 struct blkg_rwstat merged;
186 /* total time spent on device in ns, may not be accurate w/ queueing */
187 struct blkg_rwstat service_time;
188 /* total time spent waiting in scheduler queue in ns */
189 struct blkg_rwstat wait_time;
190 /* number of IOs queued up */
191 struct blkg_rwstat queued;
192 /* total sectors transferred */
193 struct blkg_stat sectors;
194 /* total disk time and nr sectors dispatched by this group */
195 struct blkg_stat time;
196#ifdef CONFIG_DEBUG_BLK_CGROUP
197 /* time not charged to this cgroup */
198 struct blkg_stat unaccounted_time;
199 /* sum of number of ios queued across all samples */
200 struct blkg_stat avg_queue_size_sum;
201 /* count of samples taken for average */
202 struct blkg_stat avg_queue_size_samples;
203 /* how many times this group has been removed from service tree */
204 struct blkg_stat dequeue;
205 /* total time spent waiting for it to be assigned a timeslice. */
206 struct blkg_stat group_wait_time;
3c798398 207 /* time spent idling for this blkcg_gq */
155fead9
TH
208 struct blkg_stat idle_time;
209 /* total time with empty current active q with other requests queued */
210 struct blkg_stat empty_time;
211 /* fields after this shouldn't be cleared on stat reset */
212 uint64_t start_group_wait_time;
213 uint64_t start_idle_time;
214 uint64_t start_empty_time;
215 uint16_t flags;
216#endif /* CONFIG_DEBUG_BLK_CGROUP */
217#endif /* CONFIG_CFQ_GROUP_IOSCHED */
218};
219
e48453c3
AA
220/* Per-cgroup data */
221struct cfq_group_data {
222 /* must be the first member */
223 struct blkcg_policy_data pd;
224
225 unsigned int weight;
226 unsigned int leaf_weight;
227};
228
cdb16e8f
VG
229/* This is per cgroup per device grouping structure */
230struct cfq_group {
f95a04af
TH
231 /* must be the first member */
232 struct blkg_policy_data pd;
233
1fa8f6d6
VG
234 /* group service_tree member */
235 struct rb_node rb_node;
236
237 /* group service_tree key */
238 u64 vdisktime;
e71357e1 239
7918ffb5
TH
240 /*
241 * The number of active cfqgs and sum of their weights under this
242 * cfqg. This covers this cfqg's leaf_weight and all children's
243 * weights, but does not cover weights of further descendants.
244 *
245 * If a cfqg is on the service tree, it's active. An active cfqg
246 * also activates its parent and contributes to the children_weight
247 * of the parent.
248 */
249 int nr_active;
250 unsigned int children_weight;
251
1d3650f7
TH
252 /*
253 * vfraction is the fraction of vdisktime that the tasks in this
254 * cfqg are entitled to. This is determined by compounding the
255 * ratios walking up from this cfqg to the root.
256 *
257 * It is in fixed point w/ CFQ_SERVICE_SHIFT and the sum of all
258 * vfractions on a service tree is approximately 1. The sum may
259 * deviate a bit due to rounding errors and fluctuations caused by
260 * cfqgs entering and leaving the service tree.
261 */
262 unsigned int vfraction;
263
e71357e1
TH
264 /*
265 * There are two weights - (internal) weight is the weight of this
266 * cfqg against the sibling cfqgs. leaf_weight is the wight of
267 * this cfqg against the child cfqgs. For the root cfqg, both
268 * weights are kept in sync for backward compatibility.
269 */
25bc6b07 270 unsigned int weight;
8184f93e 271 unsigned int new_weight;
3381cb8d 272 unsigned int dev_weight;
1fa8f6d6 273
e71357e1
TH
274 unsigned int leaf_weight;
275 unsigned int new_leaf_weight;
276 unsigned int dev_leaf_weight;
277
1fa8f6d6
VG
278 /* number of cfqq currently on this group */
279 int nr_cfqq;
280
cdb16e8f 281 /*
4495a7d4 282 * Per group busy queues average. Useful for workload slice calc. We
b4627321
VG
283 * create the array for each prio class but at run time it is used
284 * only for RT and BE class and slot for IDLE class remains unused.
285 * This is primarily done to avoid confusion and a gcc warning.
286 */
287 unsigned int busy_queues_avg[CFQ_PRIO_NR];
288 /*
289 * rr lists of queues with requests. We maintain service trees for
290 * RT and BE classes. These trees are subdivided in subclasses
291 * of SYNC, SYNC_NOIDLE and ASYNC based on workload type. For IDLE
292 * class there is no subclassification and all the cfq queues go on
293 * a single tree service_tree_idle.
cdb16e8f
VG
294 * Counts are embedded in the cfq_rb_root
295 */
296 struct cfq_rb_root service_trees[2][3];
297 struct cfq_rb_root service_tree_idle;
dae739eb 298
4d2ceea4
VG
299 unsigned long saved_wl_slice;
300 enum wl_type_t saved_wl_type;
301 enum wl_class_t saved_wl_class;
4eef3049 302
80bdf0c7
VG
303 /* number of requests that are on the dispatch list or inside driver */
304 int dispatched;
7700fc4f 305 struct cfq_ttime ttime;
0b39920b
TH
306 struct cfqg_stats stats; /* stats for this cfqg */
307 struct cfqg_stats dead_stats; /* stats pushed from dead children */
60a83707
TH
308
309 /* async queue for each priority case */
310 struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
311 struct cfq_queue *async_idle_cfqq;
312
cdb16e8f 313};
718eee05 314
c5869807
TH
315struct cfq_io_cq {
316 struct io_cq icq; /* must be the first member */
317 struct cfq_queue *cfqq[2];
318 struct cfq_ttime ttime;
598971bf
TH
319 int ioprio; /* the current ioprio */
320#ifdef CONFIG_CFQ_GROUP_IOSCHED
f4da8072 321 uint64_t blkcg_serial_nr; /* the current blkcg serial */
598971bf 322#endif
c5869807
TH
323};
324
22e2c507
JA
325/*
326 * Per block device queue structure
327 */
1da177e4 328struct cfq_data {
165125e1 329 struct request_queue *queue;
1fa8f6d6
VG
330 /* Root service tree for cfq_groups */
331 struct cfq_rb_root grp_service_tree;
f51b802c 332 struct cfq_group *root_group;
22e2c507 333
c0324a02
CZ
334 /*
335 * The priority currently being served
22e2c507 336 */
4d2ceea4
VG
337 enum wl_class_t serving_wl_class;
338 enum wl_type_t serving_wl_type;
718eee05 339 unsigned long workload_expires;
cdb16e8f 340 struct cfq_group *serving_group;
a36e71f9
JA
341
342 /*
343 * Each priority tree is sorted by next_request position. These
344 * trees are used when determining if two or more queues are
345 * interleaving requests (see cfq_close_cooperator).
346 */
347 struct rb_root prio_trees[CFQ_PRIO_LISTS];
348
22e2c507 349 unsigned int busy_queues;
ef8a41df 350 unsigned int busy_sync_queues;
22e2c507 351
53c583d2
CZ
352 int rq_in_driver;
353 int rq_in_flight[2];
45333d5a
AC
354
355 /*
356 * queue-depth detection
357 */
358 int rq_queued;
25776e35 359 int hw_tag;
e459dd08
CZ
360 /*
361 * hw_tag can be
362 * -1 => indeterminate, (cfq will behave as if NCQ is present, to allow better detection)
363 * 1 => NCQ is present (hw_tag_est_depth is the estimated max depth)
364 * 0 => no NCQ
365 */
366 int hw_tag_est_depth;
367 unsigned int hw_tag_samples;
1da177e4 368
22e2c507
JA
369 /*
370 * idle window management
371 */
372 struct timer_list idle_slice_timer;
23e018a1 373 struct work_struct unplug_work;
1da177e4 374
22e2c507 375 struct cfq_queue *active_queue;
c5869807 376 struct cfq_io_cq *active_cic;
22e2c507 377
6d048f53 378 sector_t last_position;
1da177e4 379
1da177e4
LT
380 /*
381 * tunables, see top of file
382 */
383 unsigned int cfq_quantum;
22e2c507 384 unsigned int cfq_fifo_expire[2];
1da177e4
LT
385 unsigned int cfq_back_penalty;
386 unsigned int cfq_back_max;
22e2c507
JA
387 unsigned int cfq_slice[2];
388 unsigned int cfq_slice_async_rq;
389 unsigned int cfq_slice_idle;
80bdf0c7 390 unsigned int cfq_group_idle;
963b72fc 391 unsigned int cfq_latency;
5bf14c07 392 unsigned int cfq_target_latency;
d9ff4187 393
6118b70b
JA
394 /*
395 * Fallback dummy cfqq for extreme OOM conditions
396 */
397 struct cfq_queue oom_cfqq;
365722bb 398
573412b2 399 unsigned long last_delayed_sync;
1da177e4
LT
400};
401
25fb5169 402static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
60a83707 403static void cfq_put_queue(struct cfq_queue *cfqq);
25fb5169 404
34b98d03 405static struct cfq_rb_root *st_for(struct cfq_group *cfqg,
3bf10fea 406 enum wl_class_t class,
65b32a57 407 enum wl_type_t type)
c0324a02 408{
1fa8f6d6
VG
409 if (!cfqg)
410 return NULL;
411
3bf10fea 412 if (class == IDLE_WORKLOAD)
cdb16e8f 413 return &cfqg->service_tree_idle;
c0324a02 414
3bf10fea 415 return &cfqg->service_trees[class][type];
c0324a02
CZ
416}
417
3b18152c 418enum cfqq_state_flags {
b0b8d749
JA
419 CFQ_CFQQ_FLAG_on_rr = 0, /* on round-robin busy list */
420 CFQ_CFQQ_FLAG_wait_request, /* waiting for a request */
b029195d 421 CFQ_CFQQ_FLAG_must_dispatch, /* must be allowed a dispatch */
b0b8d749 422 CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */
b0b8d749
JA
423 CFQ_CFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */
424 CFQ_CFQQ_FLAG_idle_window, /* slice idling enabled */
425 CFQ_CFQQ_FLAG_prio_changed, /* task priority has changed */
44f7c160 426 CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */
91fac317 427 CFQ_CFQQ_FLAG_sync, /* synchronous queue */
b3b6d040 428 CFQ_CFQQ_FLAG_coop, /* cfqq is shared */
ae54abed 429 CFQ_CFQQ_FLAG_split_coop, /* shared cfqq will be splitted */
76280aff 430 CFQ_CFQQ_FLAG_deep, /* sync cfqq experienced large depth */
f75edf2d 431 CFQ_CFQQ_FLAG_wait_busy, /* Waiting for next request */
3b18152c
JA
432};
433
434#define CFQ_CFQQ_FNS(name) \
435static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq) \
436{ \
fe094d98 437 (cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name); \
3b18152c
JA
438} \
439static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq) \
440{ \
fe094d98 441 (cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name); \
3b18152c
JA
442} \
443static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \
444{ \
fe094d98 445 return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0; \
3b18152c
JA
446}
447
448CFQ_CFQQ_FNS(on_rr);
449CFQ_CFQQ_FNS(wait_request);
b029195d 450CFQ_CFQQ_FNS(must_dispatch);
3b18152c 451CFQ_CFQQ_FNS(must_alloc_slice);
3b18152c
JA
452CFQ_CFQQ_FNS(fifo_expire);
453CFQ_CFQQ_FNS(idle_window);
454CFQ_CFQQ_FNS(prio_changed);
44f7c160 455CFQ_CFQQ_FNS(slice_new);
91fac317 456CFQ_CFQQ_FNS(sync);
a36e71f9 457CFQ_CFQQ_FNS(coop);
ae54abed 458CFQ_CFQQ_FNS(split_coop);
76280aff 459CFQ_CFQQ_FNS(deep);
f75edf2d 460CFQ_CFQQ_FNS(wait_busy);
3b18152c
JA
461#undef CFQ_CFQQ_FNS
462
629ed0b1 463#if defined(CONFIG_CFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
2ce4d50f 464
155fead9
TH
465/* cfqg stats flags */
466enum cfqg_stats_flags {
467 CFQG_stats_waiting = 0,
468 CFQG_stats_idling,
469 CFQG_stats_empty,
629ed0b1
TH
470};
471
155fead9
TH
472#define CFQG_FLAG_FNS(name) \
473static inline void cfqg_stats_mark_##name(struct cfqg_stats *stats) \
629ed0b1 474{ \
155fead9 475 stats->flags |= (1 << CFQG_stats_##name); \
629ed0b1 476} \
155fead9 477static inline void cfqg_stats_clear_##name(struct cfqg_stats *stats) \
629ed0b1 478{ \
155fead9 479 stats->flags &= ~(1 << CFQG_stats_##name); \
629ed0b1 480} \
155fead9 481static inline int cfqg_stats_##name(struct cfqg_stats *stats) \
629ed0b1 482{ \
155fead9 483 return (stats->flags & (1 << CFQG_stats_##name)) != 0; \
629ed0b1
TH
484} \
485
155fead9
TH
486CFQG_FLAG_FNS(waiting)
487CFQG_FLAG_FNS(idling)
488CFQG_FLAG_FNS(empty)
489#undef CFQG_FLAG_FNS
629ed0b1
TH
490
491/* This should be called with the queue_lock held. */
155fead9 492static void cfqg_stats_update_group_wait_time(struct cfqg_stats *stats)
629ed0b1
TH
493{
494 unsigned long long now;
495
155fead9 496 if (!cfqg_stats_waiting(stats))
629ed0b1
TH
497 return;
498
499 now = sched_clock();
500 if (time_after64(now, stats->start_group_wait_time))
501 blkg_stat_add(&stats->group_wait_time,
502 now - stats->start_group_wait_time);
155fead9 503 cfqg_stats_clear_waiting(stats);
629ed0b1
TH
504}
505
506/* This should be called with the queue_lock held. */
155fead9
TH
507static void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg,
508 struct cfq_group *curr_cfqg)
629ed0b1 509{
155fead9 510 struct cfqg_stats *stats = &cfqg->stats;
629ed0b1 511
155fead9 512 if (cfqg_stats_waiting(stats))
629ed0b1 513 return;
155fead9 514 if (cfqg == curr_cfqg)
629ed0b1 515 return;
155fead9
TH
516 stats->start_group_wait_time = sched_clock();
517 cfqg_stats_mark_waiting(stats);
629ed0b1
TH
518}
519
520/* This should be called with the queue_lock held. */
155fead9 521static void cfqg_stats_end_empty_time(struct cfqg_stats *stats)
629ed0b1
TH
522{
523 unsigned long long now;
524
155fead9 525 if (!cfqg_stats_empty(stats))
629ed0b1
TH
526 return;
527
528 now = sched_clock();
529 if (time_after64(now, stats->start_empty_time))
530 blkg_stat_add(&stats->empty_time,
531 now - stats->start_empty_time);
155fead9 532 cfqg_stats_clear_empty(stats);
629ed0b1
TH
533}
534
155fead9 535static void cfqg_stats_update_dequeue(struct cfq_group *cfqg)
629ed0b1 536{
155fead9 537 blkg_stat_add(&cfqg->stats.dequeue, 1);
629ed0b1
TH
538}
539
155fead9 540static void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg)
629ed0b1 541{
155fead9 542 struct cfqg_stats *stats = &cfqg->stats;
629ed0b1 543
4d5e80a7 544 if (blkg_rwstat_total(&stats->queued))
629ed0b1
TH
545 return;
546
547 /*
548 * group is already marked empty. This can happen if cfqq got new
549 * request in parent group and moved to this group while being added
550 * to service tree. Just ignore the event and move on.
551 */
155fead9 552 if (cfqg_stats_empty(stats))
629ed0b1
TH
553 return;
554
555 stats->start_empty_time = sched_clock();
155fead9 556 cfqg_stats_mark_empty(stats);
629ed0b1
TH
557}
558
155fead9 559static void cfqg_stats_update_idle_time(struct cfq_group *cfqg)
629ed0b1 560{
155fead9 561 struct cfqg_stats *stats = &cfqg->stats;
629ed0b1 562
155fead9 563 if (cfqg_stats_idling(stats)) {
629ed0b1
TH
564 unsigned long long now = sched_clock();
565
566 if (time_after64(now, stats->start_idle_time))
567 blkg_stat_add(&stats->idle_time,
568 now - stats->start_idle_time);
155fead9 569 cfqg_stats_clear_idling(stats);
629ed0b1
TH
570 }
571}
572
155fead9 573static void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg)
629ed0b1 574{
155fead9 575 struct cfqg_stats *stats = &cfqg->stats;
629ed0b1 576
155fead9 577 BUG_ON(cfqg_stats_idling(stats));
629ed0b1
TH
578
579 stats->start_idle_time = sched_clock();
155fead9 580 cfqg_stats_mark_idling(stats);
629ed0b1
TH
581}
582
155fead9 583static void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg)
629ed0b1 584{
155fead9 585 struct cfqg_stats *stats = &cfqg->stats;
629ed0b1
TH
586
587 blkg_stat_add(&stats->avg_queue_size_sum,
4d5e80a7 588 blkg_rwstat_total(&stats->queued));
629ed0b1 589 blkg_stat_add(&stats->avg_queue_size_samples, 1);
155fead9 590 cfqg_stats_update_group_wait_time(stats);
629ed0b1
TH
591}
592
593#else /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
594
f48ec1d7
TH
595static inline void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg, struct cfq_group *curr_cfqg) { }
596static inline void cfqg_stats_end_empty_time(struct cfqg_stats *stats) { }
597static inline void cfqg_stats_update_dequeue(struct cfq_group *cfqg) { }
598static inline void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg) { }
599static inline void cfqg_stats_update_idle_time(struct cfq_group *cfqg) { }
600static inline void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg) { }
601static inline void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg) { }
629ed0b1
TH
602
603#endif /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
604
605#ifdef CONFIG_CFQ_GROUP_IOSCHED
2ce4d50f 606
4ceab71b
JA
607static inline struct cfq_group *pd_to_cfqg(struct blkg_policy_data *pd)
608{
609 return pd ? container_of(pd, struct cfq_group, pd) : NULL;
610}
611
612static struct cfq_group_data
613*cpd_to_cfqgd(struct blkcg_policy_data *cpd)
614{
615 return cpd ? container_of(cpd, struct cfq_group_data, pd) : NULL;
616}
617
618static inline struct blkcg_gq *cfqg_to_blkg(struct cfq_group *cfqg)
619{
620 return pd_to_blkg(&cfqg->pd);
621}
622
ffea73fc
TH
623static struct blkcg_policy blkcg_policy_cfq;
624
625static inline struct cfq_group *blkg_to_cfqg(struct blkcg_gq *blkg)
626{
627 return pd_to_cfqg(blkg_to_pd(blkg, &blkcg_policy_cfq));
628}
629
e48453c3
AA
630static struct cfq_group_data *blkcg_to_cfqgd(struct blkcg *blkcg)
631{
632 return cpd_to_cfqgd(blkcg_to_cpd(blkcg, &blkcg_policy_cfq));
633}
634
d02f7aa8 635static inline struct cfq_group *cfqg_parent(struct cfq_group *cfqg)
7918ffb5 636{
d02f7aa8 637 struct blkcg_gq *pblkg = cfqg_to_blkg(cfqg)->parent;
7918ffb5 638
d02f7aa8 639 return pblkg ? blkg_to_cfqg(pblkg) : NULL;
7918ffb5
TH
640}
641
eb7d8c07
TH
642static inline void cfqg_get(struct cfq_group *cfqg)
643{
644 return blkg_get(cfqg_to_blkg(cfqg));
645}
646
647static inline void cfqg_put(struct cfq_group *cfqg)
648{
649 return blkg_put(cfqg_to_blkg(cfqg));
650}
651
54e7ed12
TH
652#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) do { \
653 char __pbuf[128]; \
654 \
655 blkg_path(cfqg_to_blkg((cfqq)->cfqg), __pbuf, sizeof(__pbuf)); \
b226e5c4
VG
656 blk_add_trace_msg((cfqd)->queue, "cfq%d%c%c %s " fmt, (cfqq)->pid, \
657 cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
658 cfqq_type((cfqq)) == SYNC_NOIDLE_WORKLOAD ? 'N' : ' ',\
54e7ed12
TH
659 __pbuf, ##args); \
660} while (0)
661
662#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do { \
663 char __pbuf[128]; \
664 \
665 blkg_path(cfqg_to_blkg(cfqg), __pbuf, sizeof(__pbuf)); \
666 blk_add_trace_msg((cfqd)->queue, "%s " fmt, __pbuf, ##args); \
667} while (0)
2868ef7b 668
155fead9
TH
669static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
670 struct cfq_group *curr_cfqg, int rw)
2ce4d50f 671{
155fead9
TH
672 blkg_rwstat_add(&cfqg->stats.queued, rw, 1);
673 cfqg_stats_end_empty_time(&cfqg->stats);
674 cfqg_stats_set_start_group_wait_time(cfqg, curr_cfqg);
2ce4d50f
TH
675}
676
155fead9
TH
677static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
678 unsigned long time, unsigned long unaccounted_time)
2ce4d50f 679{
155fead9 680 blkg_stat_add(&cfqg->stats.time, time);
629ed0b1 681#ifdef CONFIG_DEBUG_BLK_CGROUP
155fead9 682 blkg_stat_add(&cfqg->stats.unaccounted_time, unaccounted_time);
629ed0b1 683#endif
2ce4d50f
TH
684}
685
155fead9 686static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int rw)
2ce4d50f 687{
155fead9 688 blkg_rwstat_add(&cfqg->stats.queued, rw, -1);
2ce4d50f
TH
689}
690
155fead9 691static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int rw)
2ce4d50f 692{
155fead9 693 blkg_rwstat_add(&cfqg->stats.merged, rw, 1);
2ce4d50f
TH
694}
695
155fead9
TH
696static inline void cfqg_stats_update_dispatch(struct cfq_group *cfqg,
697 uint64_t bytes, int rw)
2ce4d50f 698{
155fead9
TH
699 blkg_stat_add(&cfqg->stats.sectors, bytes >> 9);
700 blkg_rwstat_add(&cfqg->stats.serviced, rw, 1);
701 blkg_rwstat_add(&cfqg->stats.service_bytes, rw, bytes);
2ce4d50f
TH
702}
703
155fead9
TH
704static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
705 uint64_t start_time, uint64_t io_start_time, int rw)
2ce4d50f 706{
155fead9 707 struct cfqg_stats *stats = &cfqg->stats;
629ed0b1 708 unsigned long long now = sched_clock();
629ed0b1
TH
709
710 if (time_after64(now, io_start_time))
711 blkg_rwstat_add(&stats->service_time, rw, now - io_start_time);
712 if (time_after64(io_start_time, start_time))
713 blkg_rwstat_add(&stats->wait_time, rw,
714 io_start_time - start_time);
2ce4d50f
TH
715}
716
689665af
TH
717/* @stats = 0 */
718static void cfqg_stats_reset(struct cfqg_stats *stats)
155fead9 719{
155fead9
TH
720 /* queued stats shouldn't be cleared */
721 blkg_rwstat_reset(&stats->service_bytes);
722 blkg_rwstat_reset(&stats->serviced);
723 blkg_rwstat_reset(&stats->merged);
724 blkg_rwstat_reset(&stats->service_time);
725 blkg_rwstat_reset(&stats->wait_time);
726 blkg_stat_reset(&stats->time);
727#ifdef CONFIG_DEBUG_BLK_CGROUP
728 blkg_stat_reset(&stats->unaccounted_time);
729 blkg_stat_reset(&stats->avg_queue_size_sum);
730 blkg_stat_reset(&stats->avg_queue_size_samples);
731 blkg_stat_reset(&stats->dequeue);
732 blkg_stat_reset(&stats->group_wait_time);
733 blkg_stat_reset(&stats->idle_time);
734 blkg_stat_reset(&stats->empty_time);
735#endif
736}
737
0b39920b
TH
738/* @to += @from */
739static void cfqg_stats_merge(struct cfqg_stats *to, struct cfqg_stats *from)
740{
741 /* queued stats shouldn't be cleared */
742 blkg_rwstat_merge(&to->service_bytes, &from->service_bytes);
743 blkg_rwstat_merge(&to->serviced, &from->serviced);
744 blkg_rwstat_merge(&to->merged, &from->merged);
745 blkg_rwstat_merge(&to->service_time, &from->service_time);
746 blkg_rwstat_merge(&to->wait_time, &from->wait_time);
747 blkg_stat_merge(&from->time, &from->time);
748#ifdef CONFIG_DEBUG_BLK_CGROUP
749 blkg_stat_merge(&to->unaccounted_time, &from->unaccounted_time);
750 blkg_stat_merge(&to->avg_queue_size_sum, &from->avg_queue_size_sum);
751 blkg_stat_merge(&to->avg_queue_size_samples, &from->avg_queue_size_samples);
752 blkg_stat_merge(&to->dequeue, &from->dequeue);
753 blkg_stat_merge(&to->group_wait_time, &from->group_wait_time);
754 blkg_stat_merge(&to->idle_time, &from->idle_time);
755 blkg_stat_merge(&to->empty_time, &from->empty_time);
756#endif
757}
758
759/*
760 * Transfer @cfqg's stats to its parent's dead_stats so that the ancestors'
761 * recursive stats can still account for the amount used by this cfqg after
762 * it's gone.
763 */
764static void cfqg_stats_xfer_dead(struct cfq_group *cfqg)
765{
766 struct cfq_group *parent = cfqg_parent(cfqg);
767
768 lockdep_assert_held(cfqg_to_blkg(cfqg)->q->queue_lock);
769
770 if (unlikely(!parent))
771 return;
772
773 cfqg_stats_merge(&parent->dead_stats, &cfqg->stats);
774 cfqg_stats_merge(&parent->dead_stats, &cfqg->dead_stats);
775 cfqg_stats_reset(&cfqg->stats);
776 cfqg_stats_reset(&cfqg->dead_stats);
777}
778
eb7d8c07
TH
779#else /* CONFIG_CFQ_GROUP_IOSCHED */
780
d02f7aa8 781static inline struct cfq_group *cfqg_parent(struct cfq_group *cfqg) { return NULL; }
eb7d8c07
TH
782static inline void cfqg_get(struct cfq_group *cfqg) { }
783static inline void cfqg_put(struct cfq_group *cfqg) { }
784
7b679138 785#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
b226e5c4
VG
786 blk_add_trace_msg((cfqd)->queue, "cfq%d%c%c " fmt, (cfqq)->pid, \
787 cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
788 cfqq_type((cfqq)) == SYNC_NOIDLE_WORKLOAD ? 'N' : ' ',\
789 ##args)
4495a7d4 790#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do {} while (0)
eb7d8c07 791
155fead9
TH
792static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
793 struct cfq_group *curr_cfqg, int rw) { }
794static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
795 unsigned long time, unsigned long unaccounted_time) { }
796static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int rw) { }
797static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int rw) { }
798static inline void cfqg_stats_update_dispatch(struct cfq_group *cfqg,
799 uint64_t bytes, int rw) { }
800static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
801 uint64_t start_time, uint64_t io_start_time, int rw) { }
2ce4d50f 802
eb7d8c07
TH
803#endif /* CONFIG_CFQ_GROUP_IOSCHED */
804
7b679138
JA
805#define cfq_log(cfqd, fmt, args...) \
806 blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
807
615f0259
VG
808/* Traverses through cfq group service trees */
809#define for_each_cfqg_st(cfqg, i, j, st) \
810 for (i = 0; i <= IDLE_WORKLOAD; i++) \
811 for (j = 0, st = i < IDLE_WORKLOAD ? &cfqg->service_trees[i][j]\
812 : &cfqg->service_tree_idle; \
813 (i < IDLE_WORKLOAD && j <= SYNC_WORKLOAD) || \
814 (i == IDLE_WORKLOAD && j == 0); \
815 j++, st = i < IDLE_WORKLOAD ? \
816 &cfqg->service_trees[i][j]: NULL) \
817
f5f2b6ce
SL
818static inline bool cfq_io_thinktime_big(struct cfq_data *cfqd,
819 struct cfq_ttime *ttime, bool group_idle)
820{
821 unsigned long slice;
822 if (!sample_valid(ttime->ttime_samples))
823 return false;
824 if (group_idle)
825 slice = cfqd->cfq_group_idle;
826 else
827 slice = cfqd->cfq_slice_idle;
828 return ttime->ttime_mean > slice;
829}
615f0259 830
02b35081
VG
831static inline bool iops_mode(struct cfq_data *cfqd)
832{
833 /*
834 * If we are not idling on queues and it is a NCQ drive, parallel
835 * execution of requests is on and measuring time is not possible
836 * in most of the cases until and unless we drive shallower queue
837 * depths and that becomes a performance bottleneck. In such cases
838 * switch to start providing fairness in terms of number of IOs.
839 */
840 if (!cfqd->cfq_slice_idle && cfqd->hw_tag)
841 return true;
842 else
843 return false;
844}
845
3bf10fea 846static inline enum wl_class_t cfqq_class(struct cfq_queue *cfqq)
c0324a02
CZ
847{
848 if (cfq_class_idle(cfqq))
849 return IDLE_WORKLOAD;
850 if (cfq_class_rt(cfqq))
851 return RT_WORKLOAD;
852 return BE_WORKLOAD;
853}
854
718eee05
CZ
855
856static enum wl_type_t cfqq_type(struct cfq_queue *cfqq)
857{
858 if (!cfq_cfqq_sync(cfqq))
859 return ASYNC_WORKLOAD;
860 if (!cfq_cfqq_idle_window(cfqq))
861 return SYNC_NOIDLE_WORKLOAD;
862 return SYNC_WORKLOAD;
863}
864
3bf10fea 865static inline int cfq_group_busy_queues_wl(enum wl_class_t wl_class,
58ff82f3
VG
866 struct cfq_data *cfqd,
867 struct cfq_group *cfqg)
c0324a02 868{
3bf10fea 869 if (wl_class == IDLE_WORKLOAD)
cdb16e8f 870 return cfqg->service_tree_idle.count;
c0324a02 871
34b98d03
VG
872 return cfqg->service_trees[wl_class][ASYNC_WORKLOAD].count +
873 cfqg->service_trees[wl_class][SYNC_NOIDLE_WORKLOAD].count +
874 cfqg->service_trees[wl_class][SYNC_WORKLOAD].count;
c0324a02
CZ
875}
876
f26bd1f0
VG
877static inline int cfqg_busy_async_queues(struct cfq_data *cfqd,
878 struct cfq_group *cfqg)
879{
34b98d03
VG
880 return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count +
881 cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count;
f26bd1f0
VG
882}
883
165125e1 884static void cfq_dispatch_insert(struct request_queue *, struct request *);
4f85cb96 885static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, bool is_sync,
2da8de0b 886 struct cfq_io_cq *cic, struct bio *bio);
91fac317 887
c5869807
TH
888static inline struct cfq_io_cq *icq_to_cic(struct io_cq *icq)
889{
890 /* cic->icq is the first member, %NULL will convert to %NULL */
891 return container_of(icq, struct cfq_io_cq, icq);
892}
893
47fdd4ca
TH
894static inline struct cfq_io_cq *cfq_cic_lookup(struct cfq_data *cfqd,
895 struct io_context *ioc)
896{
897 if (ioc)
898 return icq_to_cic(ioc_lookup_icq(ioc, cfqd->queue));
899 return NULL;
900}
901
c5869807 902static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_cq *cic, bool is_sync)
91fac317 903{
a6151c3a 904 return cic->cfqq[is_sync];
91fac317
VT
905}
906
c5869807
TH
907static inline void cic_set_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq,
908 bool is_sync)
91fac317 909{
a6151c3a 910 cic->cfqq[is_sync] = cfqq;
91fac317
VT
911}
912
c5869807 913static inline struct cfq_data *cic_to_cfqd(struct cfq_io_cq *cic)
bca4b914 914{
c5869807 915 return cic->icq.q->elevator->elevator_data;
bca4b914
KK
916}
917
91fac317
VT
918/*
919 * We regard a request as SYNC, if it's either a read or has the SYNC bit
920 * set (in which case it could also be direct WRITE).
921 */
a6151c3a 922static inline bool cfq_bio_sync(struct bio *bio)
91fac317 923{
7b6d91da 924 return bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC);
91fac317 925}
1da177e4 926
99f95e52
AM
927/*
928 * scheduler run of queue, if there are requests pending and no one in the
929 * driver that will restart queueing
930 */
23e018a1 931static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
99f95e52 932{
7b679138
JA
933 if (cfqd->busy_queues) {
934 cfq_log(cfqd, "schedule dispatch");
59c3d45e 935 kblockd_schedule_work(&cfqd->unplug_work);
7b679138 936 }
99f95e52
AM
937}
938
44f7c160
JA
939/*
940 * Scale schedule slice based on io priority. Use the sync time slice only
941 * if a queue is marked sync and has sync io queued. A sync queue with async
942 * io only, should not get full sync slice length.
943 */
a6151c3a 944static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync,
d9e7620e 945 unsigned short prio)
44f7c160 946{
d9e7620e 947 const int base_slice = cfqd->cfq_slice[sync];
44f7c160 948
d9e7620e
JA
949 WARN_ON(prio >= IOPRIO_BE_NR);
950
951 return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio));
952}
44f7c160 953
d9e7620e
JA
954static inline int
955cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
956{
957 return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
44f7c160
JA
958}
959
1d3650f7
TH
960/**
961 * cfqg_scale_charge - scale disk time charge according to cfqg weight
962 * @charge: disk time being charged
963 * @vfraction: vfraction of the cfqg, fixed point w/ CFQ_SERVICE_SHIFT
964 *
965 * Scale @charge according to @vfraction, which is in range (0, 1]. The
966 * scaling is inversely proportional.
967 *
968 * scaled = charge / vfraction
969 *
970 * The result is also in fixed point w/ CFQ_SERVICE_SHIFT.
971 */
972static inline u64 cfqg_scale_charge(unsigned long charge,
973 unsigned int vfraction)
25bc6b07 974{
1d3650f7 975 u64 c = charge << CFQ_SERVICE_SHIFT; /* make it fixed point */
25bc6b07 976
1d3650f7
TH
977 /* charge / vfraction */
978 c <<= CFQ_SERVICE_SHIFT;
979 do_div(c, vfraction);
980 return c;
25bc6b07
VG
981}
982
983static inline u64 max_vdisktime(u64 min_vdisktime, u64 vdisktime)
984{
985 s64 delta = (s64)(vdisktime - min_vdisktime);
986 if (delta > 0)
987 min_vdisktime = vdisktime;
988
989 return min_vdisktime;
990}
991
992static inline u64 min_vdisktime(u64 min_vdisktime, u64 vdisktime)
993{
994 s64 delta = (s64)(vdisktime - min_vdisktime);
995 if (delta < 0)
996 min_vdisktime = vdisktime;
997
998 return min_vdisktime;
999}
1000
1001static void update_min_vdisktime(struct cfq_rb_root *st)
1002{
25bc6b07
VG
1003 struct cfq_group *cfqg;
1004
25bc6b07
VG
1005 if (st->left) {
1006 cfqg = rb_entry_cfqg(st->left);
a6032710
GJ
1007 st->min_vdisktime = max_vdisktime(st->min_vdisktime,
1008 cfqg->vdisktime);
25bc6b07 1009 }
25bc6b07
VG
1010}
1011
5db5d642
CZ
1012/*
1013 * get averaged number of queues of RT/BE priority.
1014 * average is updated, with a formula that gives more weight to higher numbers,
1015 * to quickly follows sudden increases and decrease slowly
1016 */
1017
58ff82f3
VG
1018static inline unsigned cfq_group_get_avg_queues(struct cfq_data *cfqd,
1019 struct cfq_group *cfqg, bool rt)
5869619c 1020{
5db5d642
CZ
1021 unsigned min_q, max_q;
1022 unsigned mult = cfq_hist_divisor - 1;
1023 unsigned round = cfq_hist_divisor / 2;
58ff82f3 1024 unsigned busy = cfq_group_busy_queues_wl(rt, cfqd, cfqg);
5db5d642 1025
58ff82f3
VG
1026 min_q = min(cfqg->busy_queues_avg[rt], busy);
1027 max_q = max(cfqg->busy_queues_avg[rt], busy);
1028 cfqg->busy_queues_avg[rt] = (mult * max_q + min_q + round) /
5db5d642 1029 cfq_hist_divisor;
58ff82f3
VG
1030 return cfqg->busy_queues_avg[rt];
1031}
1032
1033static inline unsigned
1034cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg)
1035{
41cad6ab 1036 return cfqd->cfq_target_latency * cfqg->vfraction >> CFQ_SERVICE_SHIFT;
5db5d642
CZ
1037}
1038
c553f8e3 1039static inline unsigned
ba5bd520 1040cfq_scaled_cfqq_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
44f7c160 1041{
5db5d642
CZ
1042 unsigned slice = cfq_prio_to_slice(cfqd, cfqq);
1043 if (cfqd->cfq_latency) {
58ff82f3
VG
1044 /*
1045 * interested queues (we consider only the ones with the same
1046 * priority class in the cfq group)
1047 */
1048 unsigned iq = cfq_group_get_avg_queues(cfqd, cfqq->cfqg,
1049 cfq_class_rt(cfqq));
5db5d642
CZ
1050 unsigned sync_slice = cfqd->cfq_slice[1];
1051 unsigned expect_latency = sync_slice * iq;
58ff82f3
VG
1052 unsigned group_slice = cfq_group_slice(cfqd, cfqq->cfqg);
1053
1054 if (expect_latency > group_slice) {
5db5d642
CZ
1055 unsigned base_low_slice = 2 * cfqd->cfq_slice_idle;
1056 /* scale low_slice according to IO priority
1057 * and sync vs async */
1058 unsigned low_slice =
1059 min(slice, base_low_slice * slice / sync_slice);
1060 /* the adapted slice value is scaled to fit all iqs
1061 * into the target latency */
58ff82f3 1062 slice = max(slice * group_slice / expect_latency,
5db5d642
CZ
1063 low_slice);
1064 }
1065 }
c553f8e3
SL
1066 return slice;
1067}
1068
1069static inline void
1070cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1071{
ba5bd520 1072 unsigned slice = cfq_scaled_cfqq_slice(cfqd, cfqq);
c553f8e3 1073
dae739eb 1074 cfqq->slice_start = jiffies;
5db5d642 1075 cfqq->slice_end = jiffies + slice;
f75edf2d 1076 cfqq->allocated_slice = slice;
7b679138 1077 cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies);
44f7c160
JA
1078}
1079
1080/*
1081 * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
1082 * isn't valid until the first request from the dispatch is activated
1083 * and the slice time set.
1084 */
a6151c3a 1085static inline bool cfq_slice_used(struct cfq_queue *cfqq)
44f7c160
JA
1086{
1087 if (cfq_cfqq_slice_new(cfqq))
c1e44756 1088 return false;
44f7c160 1089 if (time_before(jiffies, cfqq->slice_end))
c1e44756 1090 return false;
44f7c160 1091
c1e44756 1092 return true;
44f7c160
JA
1093}
1094
1da177e4 1095/*
5e705374 1096 * Lifted from AS - choose which of rq1 and rq2 that is best served now.
1da177e4 1097 * We choose the request that is closest to the head right now. Distance
e8a99053 1098 * behind the head is penalized and only allowed to a certain extent.
1da177e4 1099 */
5e705374 1100static struct request *
cf7c25cf 1101cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2, sector_t last)
1da177e4 1102{
cf7c25cf 1103 sector_t s1, s2, d1 = 0, d2 = 0;
1da177e4 1104 unsigned long back_max;
e8a99053
AM
1105#define CFQ_RQ1_WRAP 0x01 /* request 1 wraps */
1106#define CFQ_RQ2_WRAP 0x02 /* request 2 wraps */
1107 unsigned wrap = 0; /* bit mask: requests behind the disk head? */
1da177e4 1108
5e705374
JA
1109 if (rq1 == NULL || rq1 == rq2)
1110 return rq2;
1111 if (rq2 == NULL)
1112 return rq1;
9c2c38a1 1113
229836bd
NK
1114 if (rq_is_sync(rq1) != rq_is_sync(rq2))
1115 return rq_is_sync(rq1) ? rq1 : rq2;
1116
65299a3b
CH
1117 if ((rq1->cmd_flags ^ rq2->cmd_flags) & REQ_PRIO)
1118 return rq1->cmd_flags & REQ_PRIO ? rq1 : rq2;
b53d1ed7 1119
83096ebf
TH
1120 s1 = blk_rq_pos(rq1);
1121 s2 = blk_rq_pos(rq2);
1da177e4 1122
1da177e4
LT
1123 /*
1124 * by definition, 1KiB is 2 sectors
1125 */
1126 back_max = cfqd->cfq_back_max * 2;
1127
1128 /*
1129 * Strict one way elevator _except_ in the case where we allow
1130 * short backward seeks which are biased as twice the cost of a
1131 * similar forward seek.
1132 */
1133 if (s1 >= last)
1134 d1 = s1 - last;
1135 else if (s1 + back_max >= last)
1136 d1 = (last - s1) * cfqd->cfq_back_penalty;
1137 else
e8a99053 1138 wrap |= CFQ_RQ1_WRAP;
1da177e4
LT
1139
1140 if (s2 >= last)
1141 d2 = s2 - last;
1142 else if (s2 + back_max >= last)
1143 d2 = (last - s2) * cfqd->cfq_back_penalty;
1144 else
e8a99053 1145 wrap |= CFQ_RQ2_WRAP;
1da177e4
LT
1146
1147 /* Found required data */
e8a99053
AM
1148
1149 /*
1150 * By doing switch() on the bit mask "wrap" we avoid having to
1151 * check two variables for all permutations: --> faster!
1152 */
1153 switch (wrap) {
5e705374 1154 case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
e8a99053 1155 if (d1 < d2)
5e705374 1156 return rq1;
e8a99053 1157 else if (d2 < d1)
5e705374 1158 return rq2;
e8a99053
AM
1159 else {
1160 if (s1 >= s2)
5e705374 1161 return rq1;
e8a99053 1162 else
5e705374 1163 return rq2;
e8a99053 1164 }
1da177e4 1165
e8a99053 1166 case CFQ_RQ2_WRAP:
5e705374 1167 return rq1;
e8a99053 1168 case CFQ_RQ1_WRAP:
5e705374
JA
1169 return rq2;
1170 case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
e8a99053
AM
1171 default:
1172 /*
1173 * Since both rqs are wrapped,
1174 * start with the one that's further behind head
1175 * (--> only *one* back seek required),
1176 * since back seek takes more time than forward.
1177 */
1178 if (s1 <= s2)
5e705374 1179 return rq1;
1da177e4 1180 else
5e705374 1181 return rq2;
1da177e4
LT
1182 }
1183}
1184
498d3aa2
JA
1185/*
1186 * The below is leftmost cache rbtree addon
1187 */
0871714e 1188static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root)
cc09e299 1189{
615f0259
VG
1190 /* Service tree is empty */
1191 if (!root->count)
1192 return NULL;
1193
cc09e299
JA
1194 if (!root->left)
1195 root->left = rb_first(&root->rb);
1196
0871714e
JA
1197 if (root->left)
1198 return rb_entry(root->left, struct cfq_queue, rb_node);
1199
1200 return NULL;
cc09e299
JA
1201}
1202
1fa8f6d6
VG
1203static struct cfq_group *cfq_rb_first_group(struct cfq_rb_root *root)
1204{
1205 if (!root->left)
1206 root->left = rb_first(&root->rb);
1207
1208 if (root->left)
1209 return rb_entry_cfqg(root->left);
1210
1211 return NULL;
1212}
1213
a36e71f9
JA
1214static void rb_erase_init(struct rb_node *n, struct rb_root *root)
1215{
1216 rb_erase(n, root);
1217 RB_CLEAR_NODE(n);
1218}
1219
cc09e299
JA
1220static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
1221{
1222 if (root->left == n)
1223 root->left = NULL;
a36e71f9 1224 rb_erase_init(n, &root->rb);
aa6f6a3d 1225 --root->count;
cc09e299
JA
1226}
1227
1da177e4
LT
1228/*
1229 * would be nice to take fifo expire time into account as well
1230 */
5e705374
JA
1231static struct request *
1232cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1233 struct request *last)
1da177e4 1234{
21183b07
JA
1235 struct rb_node *rbnext = rb_next(&last->rb_node);
1236 struct rb_node *rbprev = rb_prev(&last->rb_node);
5e705374 1237 struct request *next = NULL, *prev = NULL;
1da177e4 1238
21183b07 1239 BUG_ON(RB_EMPTY_NODE(&last->rb_node));
1da177e4
LT
1240
1241 if (rbprev)
5e705374 1242 prev = rb_entry_rq(rbprev);
1da177e4 1243
21183b07 1244 if (rbnext)
5e705374 1245 next = rb_entry_rq(rbnext);
21183b07
JA
1246 else {
1247 rbnext = rb_first(&cfqq->sort_list);
1248 if (rbnext && rbnext != &last->rb_node)
5e705374 1249 next = rb_entry_rq(rbnext);
21183b07 1250 }
1da177e4 1251
cf7c25cf 1252 return cfq_choose_req(cfqd, next, prev, blk_rq_pos(last));
1da177e4
LT
1253}
1254
d9e7620e
JA
1255static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
1256 struct cfq_queue *cfqq)
1da177e4 1257{
d9e7620e
JA
1258 /*
1259 * just an approximation, should be ok.
1260 */
cdb16e8f 1261 return (cfqq->cfqg->nr_cfqq - 1) * (cfq_prio_slice(cfqd, 1, 0) -
464191c6 1262 cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio));
d9e7620e
JA
1263}
1264
1fa8f6d6
VG
1265static inline s64
1266cfqg_key(struct cfq_rb_root *st, struct cfq_group *cfqg)
1267{
1268 return cfqg->vdisktime - st->min_vdisktime;
1269}
1270
1271static void
1272__cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
1273{
1274 struct rb_node **node = &st->rb.rb_node;
1275 struct rb_node *parent = NULL;
1276 struct cfq_group *__cfqg;
1277 s64 key = cfqg_key(st, cfqg);
1278 int left = 1;
1279
1280 while (*node != NULL) {
1281 parent = *node;
1282 __cfqg = rb_entry_cfqg(parent);
1283
1284 if (key < cfqg_key(st, __cfqg))
1285 node = &parent->rb_left;
1286 else {
1287 node = &parent->rb_right;
1288 left = 0;
1289 }
1290 }
1291
1292 if (left)
1293 st->left = &cfqg->rb_node;
1294
1295 rb_link_node(&cfqg->rb_node, parent, node);
1296 rb_insert_color(&cfqg->rb_node, &st->rb);
1297}
1298
7b5af5cf
TM
1299/*
1300 * This has to be called only on activation of cfqg
1301 */
1fa8f6d6 1302static void
8184f93e
JT
1303cfq_update_group_weight(struct cfq_group *cfqg)
1304{
3381cb8d 1305 if (cfqg->new_weight) {
8184f93e 1306 cfqg->weight = cfqg->new_weight;
3381cb8d 1307 cfqg->new_weight = 0;
8184f93e 1308 }
e15693ef
TM
1309}
1310
1311static void
1312cfq_update_group_leaf_weight(struct cfq_group *cfqg)
1313{
1314 BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
e71357e1
TH
1315
1316 if (cfqg->new_leaf_weight) {
1317 cfqg->leaf_weight = cfqg->new_leaf_weight;
1318 cfqg->new_leaf_weight = 0;
1319 }
8184f93e
JT
1320}
1321
1322static void
1323cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
1324{
1d3650f7 1325 unsigned int vfr = 1 << CFQ_SERVICE_SHIFT; /* start with 1 */
7918ffb5 1326 struct cfq_group *pos = cfqg;
1d3650f7 1327 struct cfq_group *parent;
7918ffb5
TH
1328 bool propagate;
1329
1330 /* add to the service tree */
8184f93e
JT
1331 BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
1332
7b5af5cf
TM
1333 /*
1334 * Update leaf_weight. We cannot update weight at this point
1335 * because cfqg might already have been activated and is
1336 * contributing its current weight to the parent's child_weight.
1337 */
e15693ef 1338 cfq_update_group_leaf_weight(cfqg);
8184f93e 1339 __cfq_group_service_tree_add(st, cfqg);
7918ffb5
TH
1340
1341 /*
1d3650f7
TH
1342 * Activate @cfqg and calculate the portion of vfraction @cfqg is
1343 * entitled to. vfraction is calculated by walking the tree
1344 * towards the root calculating the fraction it has at each level.
1345 * The compounded ratio is how much vfraction @cfqg owns.
1346 *
1347 * Start with the proportion tasks in this cfqg has against active
1348 * children cfqgs - its leaf_weight against children_weight.
7918ffb5
TH
1349 */
1350 propagate = !pos->nr_active++;
1351 pos->children_weight += pos->leaf_weight;
1d3650f7 1352 vfr = vfr * pos->leaf_weight / pos->children_weight;
7918ffb5 1353
1d3650f7
TH
1354 /*
1355 * Compound ->weight walking up the tree. Both activation and
1356 * vfraction calculation are done in the same loop. Propagation
1357 * stops once an already activated node is met. vfraction
1358 * calculation should always continue to the root.
1359 */
d02f7aa8 1360 while ((parent = cfqg_parent(pos))) {
1d3650f7 1361 if (propagate) {
e15693ef 1362 cfq_update_group_weight(pos);
1d3650f7
TH
1363 propagate = !parent->nr_active++;
1364 parent->children_weight += pos->weight;
1365 }
1366 vfr = vfr * pos->weight / parent->children_weight;
7918ffb5
TH
1367 pos = parent;
1368 }
1d3650f7
TH
1369
1370 cfqg->vfraction = max_t(unsigned, vfr, 1);
8184f93e
JT
1371}
1372
1373static void
1374cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
1fa8f6d6
VG
1375{
1376 struct cfq_rb_root *st = &cfqd->grp_service_tree;
1377 struct cfq_group *__cfqg;
1378 struct rb_node *n;
1379
1380 cfqg->nr_cfqq++;
760701bf 1381 if (!RB_EMPTY_NODE(&cfqg->rb_node))
1fa8f6d6
VG
1382 return;
1383
1384 /*
1385 * Currently put the group at the end. Later implement something
1386 * so that groups get lesser vtime based on their weights, so that
25985edc 1387 * if group does not loose all if it was not continuously backlogged.
1fa8f6d6
VG
1388 */
1389 n = rb_last(&st->rb);
1390 if (n) {
1391 __cfqg = rb_entry_cfqg(n);
1392 cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY;
1393 } else
1394 cfqg->vdisktime = st->min_vdisktime;
8184f93e
JT
1395 cfq_group_service_tree_add(st, cfqg);
1396}
1fa8f6d6 1397
8184f93e
JT
1398static void
1399cfq_group_service_tree_del(struct cfq_rb_root *st, struct cfq_group *cfqg)
1400{
7918ffb5
TH
1401 struct cfq_group *pos = cfqg;
1402 bool propagate;
1403
1404 /*
1405 * Undo activation from cfq_group_service_tree_add(). Deactivate
1406 * @cfqg and propagate deactivation upwards.
1407 */
1408 propagate = !--pos->nr_active;
1409 pos->children_weight -= pos->leaf_weight;
1410
1411 while (propagate) {
d02f7aa8 1412 struct cfq_group *parent = cfqg_parent(pos);
7918ffb5
TH
1413
1414 /* @pos has 0 nr_active at this point */
1415 WARN_ON_ONCE(pos->children_weight);
1d3650f7 1416 pos->vfraction = 0;
7918ffb5
TH
1417
1418 if (!parent)
1419 break;
1420
1421 propagate = !--parent->nr_active;
1422 parent->children_weight -= pos->weight;
1423 pos = parent;
1424 }
1425
1426 /* remove from the service tree */
8184f93e
JT
1427 if (!RB_EMPTY_NODE(&cfqg->rb_node))
1428 cfq_rb_erase(&cfqg->rb_node, st);
1fa8f6d6
VG
1429}
1430
1431static void
8184f93e 1432cfq_group_notify_queue_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
1fa8f6d6
VG
1433{
1434 struct cfq_rb_root *st = &cfqd->grp_service_tree;
1435
1436 BUG_ON(cfqg->nr_cfqq < 1);
1437 cfqg->nr_cfqq--;
25bc6b07 1438
1fa8f6d6
VG
1439 /* If there are other cfq queues under this group, don't delete it */
1440 if (cfqg->nr_cfqq)
1441 return;
1442
2868ef7b 1443 cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
8184f93e 1444 cfq_group_service_tree_del(st, cfqg);
4d2ceea4 1445 cfqg->saved_wl_slice = 0;
155fead9 1446 cfqg_stats_update_dequeue(cfqg);
dae739eb
VG
1447}
1448
167400d3
JT
1449static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq,
1450 unsigned int *unaccounted_time)
dae739eb 1451{
f75edf2d 1452 unsigned int slice_used;
dae739eb
VG
1453
1454 /*
1455 * Queue got expired before even a single request completed or
1456 * got expired immediately after first request completion.
1457 */
1458 if (!cfqq->slice_start || cfqq->slice_start == jiffies) {
1459 /*
1460 * Also charge the seek time incurred to the group, otherwise
1461 * if there are mutiple queues in the group, each can dispatch
1462 * a single request on seeky media and cause lots of seek time
1463 * and group will never know it.
1464 */
1465 slice_used = max_t(unsigned, (jiffies - cfqq->dispatch_start),
1466 1);
1467 } else {
1468 slice_used = jiffies - cfqq->slice_start;
167400d3
JT
1469 if (slice_used > cfqq->allocated_slice) {
1470 *unaccounted_time = slice_used - cfqq->allocated_slice;
f75edf2d 1471 slice_used = cfqq->allocated_slice;
167400d3
JT
1472 }
1473 if (time_after(cfqq->slice_start, cfqq->dispatch_start))
1474 *unaccounted_time += cfqq->slice_start -
1475 cfqq->dispatch_start;
dae739eb
VG
1476 }
1477
dae739eb
VG
1478 return slice_used;
1479}
1480
1481static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
e5ff082e 1482 struct cfq_queue *cfqq)
dae739eb
VG
1483{
1484 struct cfq_rb_root *st = &cfqd->grp_service_tree;
167400d3 1485 unsigned int used_sl, charge, unaccounted_sl = 0;
f26bd1f0
VG
1486 int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
1487 - cfqg->service_tree_idle.count;
1d3650f7 1488 unsigned int vfr;
f26bd1f0
VG
1489
1490 BUG_ON(nr_sync < 0);
167400d3 1491 used_sl = charge = cfq_cfqq_slice_usage(cfqq, &unaccounted_sl);
dae739eb 1492
02b35081
VG
1493 if (iops_mode(cfqd))
1494 charge = cfqq->slice_dispatch;
1495 else if (!cfq_cfqq_sync(cfqq) && !nr_sync)
1496 charge = cfqq->allocated_slice;
dae739eb 1497
1d3650f7
TH
1498 /*
1499 * Can't update vdisktime while on service tree and cfqg->vfraction
1500 * is valid only while on it. Cache vfr, leave the service tree,
1501 * update vdisktime and go back on. The re-addition to the tree
1502 * will also update the weights as necessary.
1503 */
1504 vfr = cfqg->vfraction;
8184f93e 1505 cfq_group_service_tree_del(st, cfqg);
1d3650f7 1506 cfqg->vdisktime += cfqg_scale_charge(charge, vfr);
8184f93e 1507 cfq_group_service_tree_add(st, cfqg);
dae739eb
VG
1508
1509 /* This group is being expired. Save the context */
1510 if (time_after(cfqd->workload_expires, jiffies)) {
4d2ceea4 1511 cfqg->saved_wl_slice = cfqd->workload_expires
dae739eb 1512 - jiffies;
4d2ceea4
VG
1513 cfqg->saved_wl_type = cfqd->serving_wl_type;
1514 cfqg->saved_wl_class = cfqd->serving_wl_class;
dae739eb 1515 } else
4d2ceea4 1516 cfqg->saved_wl_slice = 0;
2868ef7b
VG
1517
1518 cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
1519 st->min_vdisktime);
fd16d263
JP
1520 cfq_log_cfqq(cfqq->cfqd, cfqq,
1521 "sl_used=%u disp=%u charge=%u iops=%u sect=%lu",
1522 used_sl, cfqq->slice_dispatch, charge,
1523 iops_mode(cfqd), cfqq->nr_sectors);
155fead9
TH
1524 cfqg_stats_update_timeslice_used(cfqg, used_sl, unaccounted_sl);
1525 cfqg_stats_set_start_empty_time(cfqg);
1fa8f6d6
VG
1526}
1527
f51b802c
TH
1528/**
1529 * cfq_init_cfqg_base - initialize base part of a cfq_group
1530 * @cfqg: cfq_group to initialize
1531 *
1532 * Initialize the base part which is used whether %CONFIG_CFQ_GROUP_IOSCHED
1533 * is enabled or not.
1534 */
1535static void cfq_init_cfqg_base(struct cfq_group *cfqg)
1536{
1537 struct cfq_rb_root *st;
1538 int i, j;
1539
1540 for_each_cfqg_st(cfqg, i, j, st)
1541 *st = CFQ_RB_ROOT;
1542 RB_CLEAR_NODE(&cfqg->rb_node);
1543
1544 cfqg->ttime.last_end_request = jiffies;
1545}
1546
25fb5169 1547#ifdef CONFIG_CFQ_GROUP_IOSCHED
90d3839b
PZ
1548static void cfqg_stats_init(struct cfqg_stats *stats)
1549{
1550 blkg_rwstat_init(&stats->service_bytes);
1551 blkg_rwstat_init(&stats->serviced);
1552 blkg_rwstat_init(&stats->merged);
1553 blkg_rwstat_init(&stats->service_time);
1554 blkg_rwstat_init(&stats->wait_time);
1555 blkg_rwstat_init(&stats->queued);
1556
1557 blkg_stat_init(&stats->sectors);
1558 blkg_stat_init(&stats->time);
1559
1560#ifdef CONFIG_DEBUG_BLK_CGROUP
1561 blkg_stat_init(&stats->unaccounted_time);
1562 blkg_stat_init(&stats->avg_queue_size_sum);
1563 blkg_stat_init(&stats->avg_queue_size_samples);
1564 blkg_stat_init(&stats->dequeue);
1565 blkg_stat_init(&stats->group_wait_time);
1566 blkg_stat_init(&stats->idle_time);
1567 blkg_stat_init(&stats->empty_time);
1568#endif
1569}
1570
e48453c3
AA
1571static void cfq_cpd_init(const struct blkcg *blkcg)
1572{
1573 struct cfq_group_data *cgd =
1574 cpd_to_cfqgd(blkcg->pd[blkcg_policy_cfq.plid]);
1575
1576 if (blkcg == &blkcg_root) {
1577 cgd->weight = 2 * CFQ_WEIGHT_DEFAULT;
1578 cgd->leaf_weight = 2 * CFQ_WEIGHT_DEFAULT;
1579 } else {
1580 cgd->weight = CFQ_WEIGHT_DEFAULT;
1581 cgd->leaf_weight = CFQ_WEIGHT_DEFAULT;
1582 }
1583}
1584
001bea73
TH
1585static struct blkg_policy_data *cfq_pd_alloc(gfp_t gfp, int node)
1586{
b2ce2643
TH
1587 struct cfq_group *cfqg;
1588
1589 cfqg = kzalloc_node(sizeof(*cfqg), gfp, node);
1590 if (!cfqg)
1591 return NULL;
1592
1593 cfq_init_cfqg_base(cfqg);
1594 cfqg_stats_init(&cfqg->stats);
1595 cfqg_stats_init(&cfqg->dead_stats);
1596
1597 return &cfqg->pd;
001bea73
TH
1598}
1599
a9520cd6 1600static void cfq_pd_init(struct blkg_policy_data *pd)
f469a7b4 1601{
a9520cd6
TH
1602 struct cfq_group *cfqg = pd_to_cfqg(pd);
1603 struct cfq_group_data *cgd = blkcg_to_cfqgd(pd->blkg->blkcg);
25fb5169 1604
e48453c3
AA
1605 cfqg->weight = cgd->weight;
1606 cfqg->leaf_weight = cgd->leaf_weight;
25fb5169
VG
1607}
1608
a9520cd6 1609static void cfq_pd_offline(struct blkg_policy_data *pd)
0b39920b 1610{
a9520cd6 1611 struct cfq_group *cfqg = pd_to_cfqg(pd);
60a83707
TH
1612 int i;
1613
1614 for (i = 0; i < IOPRIO_BE_NR; i++) {
1615 if (cfqg->async_cfqq[0][i])
1616 cfq_put_queue(cfqg->async_cfqq[0][i]);
1617 if (cfqg->async_cfqq[1][i])
1618 cfq_put_queue(cfqg->async_cfqq[1][i]);
1619 }
1620
1621 if (cfqg->async_idle_cfqq)
1622 cfq_put_queue(cfqg->async_idle_cfqq);
1623
0b39920b
TH
1624 /*
1625 * @blkg is going offline and will be ignored by
1626 * blkg_[rw]stat_recursive_sum(). Transfer stats to the parent so
1627 * that they don't get lost. If IOs complete after this point, the
1628 * stats for them will be lost. Oh well...
1629 */
60a83707 1630 cfqg_stats_xfer_dead(cfqg);
0b39920b
TH
1631}
1632
001bea73
TH
1633static void cfq_pd_free(struct blkg_policy_data *pd)
1634{
1635 return kfree(pd);
1636}
1637
43114018
TH
1638/* offset delta from cfqg->stats to cfqg->dead_stats */
1639static const int dead_stats_off_delta = offsetof(struct cfq_group, dead_stats) -
1640 offsetof(struct cfq_group, stats);
1641
1642/* to be used by recursive prfill, sums live and dead stats recursively */
1643static u64 cfqg_stat_pd_recursive_sum(struct blkg_policy_data *pd, int off)
1644{
1645 u64 sum = 0;
1646
1647 sum += blkg_stat_recursive_sum(pd, off);
1648 sum += blkg_stat_recursive_sum(pd, off + dead_stats_off_delta);
1649 return sum;
1650}
1651
1652/* to be used by recursive prfill, sums live and dead rwstats recursively */
1653static struct blkg_rwstat cfqg_rwstat_pd_recursive_sum(struct blkg_policy_data *pd,
1654 int off)
1655{
1656 struct blkg_rwstat a, b;
1657
1658 a = blkg_rwstat_recursive_sum(pd, off);
1659 b = blkg_rwstat_recursive_sum(pd, off + dead_stats_off_delta);
1660 blkg_rwstat_merge(&a, &b);
1661 return a;
1662}
1663
a9520cd6 1664static void cfq_pd_reset_stats(struct blkg_policy_data *pd)
689665af 1665{
a9520cd6 1666 struct cfq_group *cfqg = pd_to_cfqg(pd);
689665af
TH
1667
1668 cfqg_stats_reset(&cfqg->stats);
0b39920b 1669 cfqg_stats_reset(&cfqg->dead_stats);
25fb5169
VG
1670}
1671
1672/*
3e59cf9d
VG
1673 * Search for the cfq group current task belongs to. request_queue lock must
1674 * be held.
25fb5169 1675 */
cd1604fa 1676static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd,
3c798398 1677 struct blkcg *blkcg)
25fb5169 1678{
f469a7b4 1679 struct request_queue *q = cfqd->queue;
cd1604fa 1680 struct cfq_group *cfqg = NULL;
25fb5169 1681
3c798398
TH
1682 /* avoid lookup for the common case where there's no blkcg */
1683 if (blkcg == &blkcg_root) {
cd1604fa
TH
1684 cfqg = cfqd->root_group;
1685 } else {
3c798398 1686 struct blkcg_gq *blkg;
f469a7b4 1687
3c96cb32 1688 blkg = blkg_lookup_create(blkcg, q);
cd1604fa 1689 if (!IS_ERR(blkg))
0381411e 1690 cfqg = blkg_to_cfqg(blkg);
cd1604fa 1691 }
f469a7b4 1692
25fb5169
VG
1693 return cfqg;
1694}
1695
1696static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
1697{
25fb5169 1698 cfqq->cfqg = cfqg;
b1c35769 1699 /* cfqq reference on cfqg */
eb7d8c07 1700 cfqg_get(cfqg);
b1c35769
VG
1701}
1702
f95a04af
TH
1703static u64 cfqg_prfill_weight_device(struct seq_file *sf,
1704 struct blkg_policy_data *pd, int off)
60c2bc2d 1705{
f95a04af 1706 struct cfq_group *cfqg = pd_to_cfqg(pd);
3381cb8d
TH
1707
1708 if (!cfqg->dev_weight)
60c2bc2d 1709 return 0;
f95a04af 1710 return __blkg_prfill_u64(sf, pd, cfqg->dev_weight);
60c2bc2d
TH
1711}
1712
2da8ca82 1713static int cfqg_print_weight_device(struct seq_file *sf, void *v)
60c2bc2d 1714{
2da8ca82
TH
1715 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1716 cfqg_prfill_weight_device, &blkcg_policy_cfq,
1717 0, false);
60c2bc2d
TH
1718 return 0;
1719}
1720
e71357e1
TH
1721static u64 cfqg_prfill_leaf_weight_device(struct seq_file *sf,
1722 struct blkg_policy_data *pd, int off)
1723{
1724 struct cfq_group *cfqg = pd_to_cfqg(pd);
1725
1726 if (!cfqg->dev_leaf_weight)
1727 return 0;
1728 return __blkg_prfill_u64(sf, pd, cfqg->dev_leaf_weight);
1729}
1730
2da8ca82 1731static int cfqg_print_leaf_weight_device(struct seq_file *sf, void *v)
e71357e1 1732{
2da8ca82
TH
1733 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1734 cfqg_prfill_leaf_weight_device, &blkcg_policy_cfq,
1735 0, false);
e71357e1
TH
1736 return 0;
1737}
1738
2da8ca82 1739static int cfq_print_weight(struct seq_file *sf, void *v)
60c2bc2d 1740{
e48453c3 1741 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
9470e4a6
JA
1742 struct cfq_group_data *cgd = blkcg_to_cfqgd(blkcg);
1743 unsigned int val = 0;
e48453c3 1744
9470e4a6
JA
1745 if (cgd)
1746 val = cgd->weight;
1747
1748 seq_printf(sf, "%u\n", val);
60c2bc2d
TH
1749 return 0;
1750}
1751
2da8ca82 1752static int cfq_print_leaf_weight(struct seq_file *sf, void *v)
e71357e1 1753{
e48453c3 1754 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
9470e4a6
JA
1755 struct cfq_group_data *cgd = blkcg_to_cfqgd(blkcg);
1756 unsigned int val = 0;
1757
1758 if (cgd)
1759 val = cgd->leaf_weight;
e48453c3 1760
9470e4a6 1761 seq_printf(sf, "%u\n", val);
e71357e1
TH
1762 return 0;
1763}
1764
451af504
TH
1765static ssize_t __cfqg_set_weight_device(struct kernfs_open_file *of,
1766 char *buf, size_t nbytes, loff_t off,
1767 bool is_leaf_weight)
60c2bc2d 1768{
451af504 1769 struct blkcg *blkcg = css_to_blkcg(of_css(of));
60c2bc2d 1770 struct blkg_conf_ctx ctx;
3381cb8d 1771 struct cfq_group *cfqg;
e48453c3 1772 struct cfq_group_data *cfqgd;
60c2bc2d
TH
1773 int ret;
1774
3c798398 1775 ret = blkg_conf_prep(blkcg, &blkcg_policy_cfq, buf, &ctx);
60c2bc2d
TH
1776 if (ret)
1777 return ret;
1778
1779 ret = -EINVAL;
3381cb8d 1780 cfqg = blkg_to_cfqg(ctx.blkg);
e48453c3 1781 cfqgd = blkcg_to_cfqgd(blkcg);
ae994ea9
JA
1782 if (!cfqg || !cfqgd)
1783 goto err;
1784
a2b1693b 1785 if (!ctx.v || (ctx.v >= CFQ_WEIGHT_MIN && ctx.v <= CFQ_WEIGHT_MAX)) {
e71357e1
TH
1786 if (!is_leaf_weight) {
1787 cfqg->dev_weight = ctx.v;
e48453c3 1788 cfqg->new_weight = ctx.v ?: cfqgd->weight;
e71357e1
TH
1789 } else {
1790 cfqg->dev_leaf_weight = ctx.v;
e48453c3 1791 cfqg->new_leaf_weight = ctx.v ?: cfqgd->leaf_weight;
e71357e1 1792 }
60c2bc2d
TH
1793 ret = 0;
1794 }
1795
ae994ea9 1796err:
60c2bc2d 1797 blkg_conf_finish(&ctx);
451af504 1798 return ret ?: nbytes;
60c2bc2d
TH
1799}
1800
451af504
TH
1801static ssize_t cfqg_set_weight_device(struct kernfs_open_file *of,
1802 char *buf, size_t nbytes, loff_t off)
e71357e1 1803{
451af504 1804 return __cfqg_set_weight_device(of, buf, nbytes, off, false);
e71357e1
TH
1805}
1806
451af504
TH
1807static ssize_t cfqg_set_leaf_weight_device(struct kernfs_open_file *of,
1808 char *buf, size_t nbytes, loff_t off)
e71357e1 1809{
451af504 1810 return __cfqg_set_weight_device(of, buf, nbytes, off, true);
e71357e1
TH
1811}
1812
182446d0
TH
1813static int __cfq_set_weight(struct cgroup_subsys_state *css, struct cftype *cft,
1814 u64 val, bool is_leaf_weight)
60c2bc2d 1815{
182446d0 1816 struct blkcg *blkcg = css_to_blkcg(css);
3c798398 1817 struct blkcg_gq *blkg;
e48453c3 1818 struct cfq_group_data *cfqgd;
ae994ea9 1819 int ret = 0;
60c2bc2d 1820
3381cb8d 1821 if (val < CFQ_WEIGHT_MIN || val > CFQ_WEIGHT_MAX)
60c2bc2d
TH
1822 return -EINVAL;
1823
1824 spin_lock_irq(&blkcg->lock);
e48453c3 1825 cfqgd = blkcg_to_cfqgd(blkcg);
ae994ea9
JA
1826 if (!cfqgd) {
1827 ret = -EINVAL;
1828 goto out;
1829 }
e71357e1
TH
1830
1831 if (!is_leaf_weight)
e48453c3 1832 cfqgd->weight = val;
e71357e1 1833 else
e48453c3 1834 cfqgd->leaf_weight = val;
60c2bc2d 1835
b67bfe0d 1836 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
3381cb8d 1837 struct cfq_group *cfqg = blkg_to_cfqg(blkg);
60c2bc2d 1838
e71357e1
TH
1839 if (!cfqg)
1840 continue;
1841
1842 if (!is_leaf_weight) {
1843 if (!cfqg->dev_weight)
e48453c3 1844 cfqg->new_weight = cfqgd->weight;
e71357e1
TH
1845 } else {
1846 if (!cfqg->dev_leaf_weight)
e48453c3 1847 cfqg->new_leaf_weight = cfqgd->leaf_weight;
e71357e1 1848 }
60c2bc2d
TH
1849 }
1850
ae994ea9 1851out:
60c2bc2d 1852 spin_unlock_irq(&blkcg->lock);
ae994ea9 1853 return ret;
60c2bc2d
TH
1854}
1855
182446d0
TH
1856static int cfq_set_weight(struct cgroup_subsys_state *css, struct cftype *cft,
1857 u64 val)
e71357e1 1858{
182446d0 1859 return __cfq_set_weight(css, cft, val, false);
e71357e1
TH
1860}
1861
182446d0
TH
1862static int cfq_set_leaf_weight(struct cgroup_subsys_state *css,
1863 struct cftype *cft, u64 val)
e71357e1 1864{
182446d0 1865 return __cfq_set_weight(css, cft, val, true);
e71357e1
TH
1866}
1867
2da8ca82 1868static int cfqg_print_stat(struct seq_file *sf, void *v)
5bc4afb1 1869{
2da8ca82
TH
1870 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_stat,
1871 &blkcg_policy_cfq, seq_cft(sf)->private, false);
5bc4afb1
TH
1872 return 0;
1873}
1874
2da8ca82 1875static int cfqg_print_rwstat(struct seq_file *sf, void *v)
5bc4afb1 1876{
2da8ca82
TH
1877 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_rwstat,
1878 &blkcg_policy_cfq, seq_cft(sf)->private, true);
5bc4afb1
TH
1879 return 0;
1880}
1881
43114018
TH
1882static u64 cfqg_prfill_stat_recursive(struct seq_file *sf,
1883 struct blkg_policy_data *pd, int off)
1884{
1885 u64 sum = cfqg_stat_pd_recursive_sum(pd, off);
1886
1887 return __blkg_prfill_u64(sf, pd, sum);
1888}
1889
1890static u64 cfqg_prfill_rwstat_recursive(struct seq_file *sf,
1891 struct blkg_policy_data *pd, int off)
1892{
1893 struct blkg_rwstat sum = cfqg_rwstat_pd_recursive_sum(pd, off);
1894
1895 return __blkg_prfill_rwstat(sf, pd, &sum);
1896}
1897
2da8ca82 1898static int cfqg_print_stat_recursive(struct seq_file *sf, void *v)
43114018 1899{
2da8ca82
TH
1900 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1901 cfqg_prfill_stat_recursive, &blkcg_policy_cfq,
1902 seq_cft(sf)->private, false);
43114018
TH
1903 return 0;
1904}
1905
2da8ca82 1906static int cfqg_print_rwstat_recursive(struct seq_file *sf, void *v)
43114018 1907{
2da8ca82
TH
1908 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1909 cfqg_prfill_rwstat_recursive, &blkcg_policy_cfq,
1910 seq_cft(sf)->private, true);
43114018
TH
1911 return 0;
1912}
1913
60c2bc2d 1914#ifdef CONFIG_DEBUG_BLK_CGROUP
f95a04af
TH
1915static u64 cfqg_prfill_avg_queue_size(struct seq_file *sf,
1916 struct blkg_policy_data *pd, int off)
60c2bc2d 1917{
f95a04af 1918 struct cfq_group *cfqg = pd_to_cfqg(pd);
155fead9 1919 u64 samples = blkg_stat_read(&cfqg->stats.avg_queue_size_samples);
60c2bc2d
TH
1920 u64 v = 0;
1921
1922 if (samples) {
155fead9 1923 v = blkg_stat_read(&cfqg->stats.avg_queue_size_sum);
f3cff25f 1924 v = div64_u64(v, samples);
60c2bc2d 1925 }
f95a04af 1926 __blkg_prfill_u64(sf, pd, v);
60c2bc2d
TH
1927 return 0;
1928}
1929
1930/* print avg_queue_size */
2da8ca82 1931static int cfqg_print_avg_queue_size(struct seq_file *sf, void *v)
60c2bc2d 1932{
2da8ca82
TH
1933 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1934 cfqg_prfill_avg_queue_size, &blkcg_policy_cfq,
1935 0, false);
60c2bc2d
TH
1936 return 0;
1937}
1938#endif /* CONFIG_DEBUG_BLK_CGROUP */
1939
1940static struct cftype cfq_blkcg_files[] = {
1d3650f7 1941 /* on root, weight is mapped to leaf_weight */
60c2bc2d
TH
1942 {
1943 .name = "weight_device",
1d3650f7 1944 .flags = CFTYPE_ONLY_ON_ROOT,
2da8ca82 1945 .seq_show = cfqg_print_leaf_weight_device,
451af504 1946 .write = cfqg_set_leaf_weight_device,
60c2bc2d
TH
1947 },
1948 {
1949 .name = "weight",
1d3650f7 1950 .flags = CFTYPE_ONLY_ON_ROOT,
2da8ca82 1951 .seq_show = cfq_print_leaf_weight,
1d3650f7 1952 .write_u64 = cfq_set_leaf_weight,
60c2bc2d 1953 },
e71357e1 1954
1d3650f7 1955 /* no such mapping necessary for !roots */
60c2bc2d
TH
1956 {
1957 .name = "weight_device",
1d3650f7 1958 .flags = CFTYPE_NOT_ON_ROOT,
2da8ca82 1959 .seq_show = cfqg_print_weight_device,
451af504 1960 .write = cfqg_set_weight_device,
60c2bc2d
TH
1961 },
1962 {
1963 .name = "weight",
1d3650f7 1964 .flags = CFTYPE_NOT_ON_ROOT,
2da8ca82 1965 .seq_show = cfq_print_weight,
3381cb8d 1966 .write_u64 = cfq_set_weight,
60c2bc2d 1967 },
e71357e1 1968
e71357e1
TH
1969 {
1970 .name = "leaf_weight_device",
2da8ca82 1971 .seq_show = cfqg_print_leaf_weight_device,
451af504 1972 .write = cfqg_set_leaf_weight_device,
e71357e1
TH
1973 },
1974 {
1975 .name = "leaf_weight",
2da8ca82 1976 .seq_show = cfq_print_leaf_weight,
e71357e1
TH
1977 .write_u64 = cfq_set_leaf_weight,
1978 },
1979
43114018 1980 /* statistics, covers only the tasks in the cfqg */
60c2bc2d
TH
1981 {
1982 .name = "time",
5bc4afb1 1983 .private = offsetof(struct cfq_group, stats.time),
2da8ca82 1984 .seq_show = cfqg_print_stat,
60c2bc2d
TH
1985 },
1986 {
1987 .name = "sectors",
5bc4afb1 1988 .private = offsetof(struct cfq_group, stats.sectors),
2da8ca82 1989 .seq_show = cfqg_print_stat,
60c2bc2d
TH
1990 },
1991 {
1992 .name = "io_service_bytes",
5bc4afb1 1993 .private = offsetof(struct cfq_group, stats.service_bytes),
2da8ca82 1994 .seq_show = cfqg_print_rwstat,
60c2bc2d
TH
1995 },
1996 {
1997 .name = "io_serviced",
5bc4afb1 1998 .private = offsetof(struct cfq_group, stats.serviced),
2da8ca82 1999 .seq_show = cfqg_print_rwstat,
60c2bc2d
TH
2000 },
2001 {
2002 .name = "io_service_time",
5bc4afb1 2003 .private = offsetof(struct cfq_group, stats.service_time),
2da8ca82 2004 .seq_show = cfqg_print_rwstat,
60c2bc2d
TH
2005 },
2006 {
2007 .name = "io_wait_time",
5bc4afb1 2008 .private = offsetof(struct cfq_group, stats.wait_time),
2da8ca82 2009 .seq_show = cfqg_print_rwstat,
60c2bc2d
TH
2010 },
2011 {
2012 .name = "io_merged",
5bc4afb1 2013 .private = offsetof(struct cfq_group, stats.merged),
2da8ca82 2014 .seq_show = cfqg_print_rwstat,
60c2bc2d
TH
2015 },
2016 {
2017 .name = "io_queued",
5bc4afb1 2018 .private = offsetof(struct cfq_group, stats.queued),
2da8ca82 2019 .seq_show = cfqg_print_rwstat,
60c2bc2d 2020 },
43114018
TH
2021
2022 /* the same statictics which cover the cfqg and its descendants */
2023 {
2024 .name = "time_recursive",
2025 .private = offsetof(struct cfq_group, stats.time),
2da8ca82 2026 .seq_show = cfqg_print_stat_recursive,
43114018
TH
2027 },
2028 {
2029 .name = "sectors_recursive",
2030 .private = offsetof(struct cfq_group, stats.sectors),
2da8ca82 2031 .seq_show = cfqg_print_stat_recursive,
43114018
TH
2032 },
2033 {
2034 .name = "io_service_bytes_recursive",
2035 .private = offsetof(struct cfq_group, stats.service_bytes),
2da8ca82 2036 .seq_show = cfqg_print_rwstat_recursive,
43114018
TH
2037 },
2038 {
2039 .name = "io_serviced_recursive",
2040 .private = offsetof(struct cfq_group, stats.serviced),
2da8ca82 2041 .seq_show = cfqg_print_rwstat_recursive,
43114018
TH
2042 },
2043 {
2044 .name = "io_service_time_recursive",
2045 .private = offsetof(struct cfq_group, stats.service_time),
2da8ca82 2046 .seq_show = cfqg_print_rwstat_recursive,
43114018
TH
2047 },
2048 {
2049 .name = "io_wait_time_recursive",
2050 .private = offsetof(struct cfq_group, stats.wait_time),
2da8ca82 2051 .seq_show = cfqg_print_rwstat_recursive,
43114018
TH
2052 },
2053 {
2054 .name = "io_merged_recursive",
2055 .private = offsetof(struct cfq_group, stats.merged),
2da8ca82 2056 .seq_show = cfqg_print_rwstat_recursive,
43114018
TH
2057 },
2058 {
2059 .name = "io_queued_recursive",
2060 .private = offsetof(struct cfq_group, stats.queued),
2da8ca82 2061 .seq_show = cfqg_print_rwstat_recursive,
43114018 2062 },
60c2bc2d
TH
2063#ifdef CONFIG_DEBUG_BLK_CGROUP
2064 {
2065 .name = "avg_queue_size",
2da8ca82 2066 .seq_show = cfqg_print_avg_queue_size,
60c2bc2d
TH
2067 },
2068 {
2069 .name = "group_wait_time",
5bc4afb1 2070 .private = offsetof(struct cfq_group, stats.group_wait_time),
2da8ca82 2071 .seq_show = cfqg_print_stat,
60c2bc2d
TH
2072 },
2073 {
2074 .name = "idle_time",
5bc4afb1 2075 .private = offsetof(struct cfq_group, stats.idle_time),
2da8ca82 2076 .seq_show = cfqg_print_stat,
60c2bc2d
TH
2077 },
2078 {
2079 .name = "empty_time",
5bc4afb1 2080 .private = offsetof(struct cfq_group, stats.empty_time),
2da8ca82 2081 .seq_show = cfqg_print_stat,
60c2bc2d
TH
2082 },
2083 {
2084 .name = "dequeue",
5bc4afb1 2085 .private = offsetof(struct cfq_group, stats.dequeue),
2da8ca82 2086 .seq_show = cfqg_print_stat,
60c2bc2d
TH
2087 },
2088 {
2089 .name = "unaccounted_time",
5bc4afb1 2090 .private = offsetof(struct cfq_group, stats.unaccounted_time),
2da8ca82 2091 .seq_show = cfqg_print_stat,
60c2bc2d
TH
2092 },
2093#endif /* CONFIG_DEBUG_BLK_CGROUP */
2094 { } /* terminate */
2095};
25fb5169 2096#else /* GROUP_IOSCHED */
cd1604fa 2097static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd,
3c798398 2098 struct blkcg *blkcg)
25fb5169 2099{
f51b802c 2100 return cfqd->root_group;
25fb5169 2101}
7f1dc8a2 2102
25fb5169
VG
2103static inline void
2104cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) {
2105 cfqq->cfqg = cfqg;
2106}
2107
2108#endif /* GROUP_IOSCHED */
2109
498d3aa2 2110/*
c0324a02 2111 * The cfqd->service_trees holds all pending cfq_queue's that have
498d3aa2
JA
2112 * requests waiting to be processed. It is sorted in the order that
2113 * we will service the queues.
2114 */
a36e71f9 2115static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
a6151c3a 2116 bool add_front)
d9e7620e 2117{
0871714e
JA
2118 struct rb_node **p, *parent;
2119 struct cfq_queue *__cfqq;
d9e7620e 2120 unsigned long rb_key;
34b98d03 2121 struct cfq_rb_root *st;
498d3aa2 2122 int left;
dae739eb 2123 int new_cfqq = 1;
ae30c286 2124
34b98d03 2125 st = st_for(cfqq->cfqg, cfqq_class(cfqq), cfqq_type(cfqq));
0871714e
JA
2126 if (cfq_class_idle(cfqq)) {
2127 rb_key = CFQ_IDLE_DELAY;
34b98d03 2128 parent = rb_last(&st->rb);
0871714e
JA
2129 if (parent && parent != &cfqq->rb_node) {
2130 __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
2131 rb_key += __cfqq->rb_key;
2132 } else
2133 rb_key += jiffies;
2134 } else if (!add_front) {
b9c8946b
JA
2135 /*
2136 * Get our rb key offset. Subtract any residual slice
2137 * value carried from last service. A negative resid
2138 * count indicates slice overrun, and this should position
2139 * the next service time further away in the tree.
2140 */
edd75ffd 2141 rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
b9c8946b 2142 rb_key -= cfqq->slice_resid;
edd75ffd 2143 cfqq->slice_resid = 0;
48e025e6
CZ
2144 } else {
2145 rb_key = -HZ;
34b98d03 2146 __cfqq = cfq_rb_first(st);
48e025e6
CZ
2147 rb_key += __cfqq ? __cfqq->rb_key : jiffies;
2148 }
1da177e4 2149
d9e7620e 2150 if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
dae739eb 2151 new_cfqq = 0;
99f9628a 2152 /*
d9e7620e 2153 * same position, nothing more to do
99f9628a 2154 */
34b98d03 2155 if (rb_key == cfqq->rb_key && cfqq->service_tree == st)
d9e7620e 2156 return;
1da177e4 2157
aa6f6a3d
CZ
2158 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
2159 cfqq->service_tree = NULL;
1da177e4 2160 }
d9e7620e 2161
498d3aa2 2162 left = 1;
0871714e 2163 parent = NULL;
34b98d03
VG
2164 cfqq->service_tree = st;
2165 p = &st->rb.rb_node;
d9e7620e
JA
2166 while (*p) {
2167 parent = *p;
2168 __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
2169
0c534e0a 2170 /*
c0324a02 2171 * sort by key, that represents service time.
0c534e0a 2172 */
c0324a02 2173 if (time_before(rb_key, __cfqq->rb_key))
1f23f121 2174 p = &parent->rb_left;
c0324a02 2175 else {
1f23f121 2176 p = &parent->rb_right;
cc09e299 2177 left = 0;
c0324a02 2178 }
d9e7620e
JA
2179 }
2180
cc09e299 2181 if (left)
34b98d03 2182 st->left = &cfqq->rb_node;
cc09e299 2183
d9e7620e
JA
2184 cfqq->rb_key = rb_key;
2185 rb_link_node(&cfqq->rb_node, parent, p);
34b98d03
VG
2186 rb_insert_color(&cfqq->rb_node, &st->rb);
2187 st->count++;
20359f27 2188 if (add_front || !new_cfqq)
dae739eb 2189 return;
8184f93e 2190 cfq_group_notify_queue_add(cfqd, cfqq->cfqg);
1da177e4
LT
2191}
2192
a36e71f9 2193static struct cfq_queue *
f2d1f0ae
JA
2194cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root,
2195 sector_t sector, struct rb_node **ret_parent,
2196 struct rb_node ***rb_link)
a36e71f9 2197{
a36e71f9
JA
2198 struct rb_node **p, *parent;
2199 struct cfq_queue *cfqq = NULL;
2200
2201 parent = NULL;
2202 p = &root->rb_node;
2203 while (*p) {
2204 struct rb_node **n;
2205
2206 parent = *p;
2207 cfqq = rb_entry(parent, struct cfq_queue, p_node);
2208
2209 /*
2210 * Sort strictly based on sector. Smallest to the left,
2211 * largest to the right.
2212 */
2e46e8b2 2213 if (sector > blk_rq_pos(cfqq->next_rq))
a36e71f9 2214 n = &(*p)->rb_right;
2e46e8b2 2215 else if (sector < blk_rq_pos(cfqq->next_rq))
a36e71f9
JA
2216 n = &(*p)->rb_left;
2217 else
2218 break;
2219 p = n;
3ac6c9f8 2220 cfqq = NULL;
a36e71f9
JA
2221 }
2222
2223 *ret_parent = parent;
2224 if (rb_link)
2225 *rb_link = p;
3ac6c9f8 2226 return cfqq;
a36e71f9
JA
2227}
2228
2229static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2230{
a36e71f9
JA
2231 struct rb_node **p, *parent;
2232 struct cfq_queue *__cfqq;
2233
f2d1f0ae
JA
2234 if (cfqq->p_root) {
2235 rb_erase(&cfqq->p_node, cfqq->p_root);
2236 cfqq->p_root = NULL;
2237 }
a36e71f9
JA
2238
2239 if (cfq_class_idle(cfqq))
2240 return;
2241 if (!cfqq->next_rq)
2242 return;
2243
f2d1f0ae 2244 cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio];
2e46e8b2
TH
2245 __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root,
2246 blk_rq_pos(cfqq->next_rq), &parent, &p);
3ac6c9f8
JA
2247 if (!__cfqq) {
2248 rb_link_node(&cfqq->p_node, parent, p);
f2d1f0ae
JA
2249 rb_insert_color(&cfqq->p_node, cfqq->p_root);
2250 } else
2251 cfqq->p_root = NULL;
a36e71f9
JA
2252}
2253
498d3aa2
JA
2254/*
2255 * Update cfqq's position in the service tree.
2256 */
edd75ffd 2257static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
6d048f53 2258{
6d048f53
JA
2259 /*
2260 * Resorting requires the cfqq to be on the RR list already.
2261 */
a36e71f9 2262 if (cfq_cfqq_on_rr(cfqq)) {
edd75ffd 2263 cfq_service_tree_add(cfqd, cfqq, 0);
a36e71f9
JA
2264 cfq_prio_tree_add(cfqd, cfqq);
2265 }
6d048f53
JA
2266}
2267
1da177e4
LT
2268/*
2269 * add to busy list of queues for service, trying to be fair in ordering
22e2c507 2270 * the pending list according to last request service
1da177e4 2271 */
febffd61 2272static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1da177e4 2273{
7b679138 2274 cfq_log_cfqq(cfqd, cfqq, "add_to_rr");
3b18152c
JA
2275 BUG_ON(cfq_cfqq_on_rr(cfqq));
2276 cfq_mark_cfqq_on_rr(cfqq);
1da177e4 2277 cfqd->busy_queues++;
ef8a41df
SL
2278 if (cfq_cfqq_sync(cfqq))
2279 cfqd->busy_sync_queues++;
1da177e4 2280
edd75ffd 2281 cfq_resort_rr_list(cfqd, cfqq);
1da177e4
LT
2282}
2283
498d3aa2
JA
2284/*
2285 * Called when the cfqq no longer has requests pending, remove it from
2286 * the service tree.
2287 */
febffd61 2288static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1da177e4 2289{
7b679138 2290 cfq_log_cfqq(cfqd, cfqq, "del_from_rr");
3b18152c
JA
2291 BUG_ON(!cfq_cfqq_on_rr(cfqq));
2292 cfq_clear_cfqq_on_rr(cfqq);
1da177e4 2293
aa6f6a3d
CZ
2294 if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
2295 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
2296 cfqq->service_tree = NULL;
2297 }
f2d1f0ae
JA
2298 if (cfqq->p_root) {
2299 rb_erase(&cfqq->p_node, cfqq->p_root);
2300 cfqq->p_root = NULL;
2301 }
d9e7620e 2302
8184f93e 2303 cfq_group_notify_queue_del(cfqd, cfqq->cfqg);
1da177e4
LT
2304 BUG_ON(!cfqd->busy_queues);
2305 cfqd->busy_queues--;
ef8a41df
SL
2306 if (cfq_cfqq_sync(cfqq))
2307 cfqd->busy_sync_queues--;
1da177e4
LT
2308}
2309
2310/*
2311 * rb tree support functions
2312 */
febffd61 2313static void cfq_del_rq_rb(struct request *rq)
1da177e4 2314{
5e705374 2315 struct cfq_queue *cfqq = RQ_CFQQ(rq);
5e705374 2316 const int sync = rq_is_sync(rq);
1da177e4 2317
b4878f24
JA
2318 BUG_ON(!cfqq->queued[sync]);
2319 cfqq->queued[sync]--;
1da177e4 2320
5e705374 2321 elv_rb_del(&cfqq->sort_list, rq);
1da177e4 2322
f04a6424
VG
2323 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) {
2324 /*
2325 * Queue will be deleted from service tree when we actually
2326 * expire it later. Right now just remove it from prio tree
2327 * as it is empty.
2328 */
2329 if (cfqq->p_root) {
2330 rb_erase(&cfqq->p_node, cfqq->p_root);
2331 cfqq->p_root = NULL;
2332 }
2333 }
1da177e4
LT
2334}
2335
5e705374 2336static void cfq_add_rq_rb(struct request *rq)
1da177e4 2337{
5e705374 2338 struct cfq_queue *cfqq = RQ_CFQQ(rq);
1da177e4 2339 struct cfq_data *cfqd = cfqq->cfqd;
796d5116 2340 struct request *prev;
1da177e4 2341
5380a101 2342 cfqq->queued[rq_is_sync(rq)]++;
1da177e4 2343
796d5116 2344 elv_rb_add(&cfqq->sort_list, rq);
5fccbf61
JA
2345
2346 if (!cfq_cfqq_on_rr(cfqq))
2347 cfq_add_cfqq_rr(cfqd, cfqq);
5044eed4
JA
2348
2349 /*
2350 * check if this request is a better next-serve candidate
2351 */
a36e71f9 2352 prev = cfqq->next_rq;
cf7c25cf 2353 cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq, cfqd->last_position);
a36e71f9
JA
2354
2355 /*
2356 * adjust priority tree position, if ->next_rq changes
2357 */
2358 if (prev != cfqq->next_rq)
2359 cfq_prio_tree_add(cfqd, cfqq);
2360
5044eed4 2361 BUG_ON(!cfqq->next_rq);
1da177e4
LT
2362}
2363
febffd61 2364static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
1da177e4 2365{
5380a101
JA
2366 elv_rb_del(&cfqq->sort_list, rq);
2367 cfqq->queued[rq_is_sync(rq)]--;
155fead9 2368 cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags);
5e705374 2369 cfq_add_rq_rb(rq);
155fead9
TH
2370 cfqg_stats_update_io_add(RQ_CFQG(rq), cfqq->cfqd->serving_group,
2371 rq->cmd_flags);
1da177e4
LT
2372}
2373
206dc69b
JA
2374static struct request *
2375cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
1da177e4 2376{
206dc69b 2377 struct task_struct *tsk = current;
c5869807 2378 struct cfq_io_cq *cic;
206dc69b 2379 struct cfq_queue *cfqq;
1da177e4 2380
4ac845a2 2381 cic = cfq_cic_lookup(cfqd, tsk->io_context);
91fac317
VT
2382 if (!cic)
2383 return NULL;
2384
2385 cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
f73a1c7d
KO
2386 if (cfqq)
2387 return elv_rb_find(&cfqq->sort_list, bio_end_sector(bio));
1da177e4 2388
1da177e4
LT
2389 return NULL;
2390}
2391
165125e1 2392static void cfq_activate_request(struct request_queue *q, struct request *rq)
1da177e4 2393{
22e2c507 2394 struct cfq_data *cfqd = q->elevator->elevator_data;
3b18152c 2395
53c583d2 2396 cfqd->rq_in_driver++;
7b679138 2397 cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
53c583d2 2398 cfqd->rq_in_driver);
25776e35 2399
5b93629b 2400 cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
1da177e4
LT
2401}
2402
165125e1 2403static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
1da177e4 2404{
b4878f24
JA
2405 struct cfq_data *cfqd = q->elevator->elevator_data;
2406
53c583d2
CZ
2407 WARN_ON(!cfqd->rq_in_driver);
2408 cfqd->rq_in_driver--;
7b679138 2409 cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d",
53c583d2 2410 cfqd->rq_in_driver);
1da177e4
LT
2411}
2412
b4878f24 2413static void cfq_remove_request(struct request *rq)
1da177e4 2414{
5e705374 2415 struct cfq_queue *cfqq = RQ_CFQQ(rq);
21183b07 2416
5e705374
JA
2417 if (cfqq->next_rq == rq)
2418 cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
1da177e4 2419
b4878f24 2420 list_del_init(&rq->queuelist);
5e705374 2421 cfq_del_rq_rb(rq);
374f84ac 2422
45333d5a 2423 cfqq->cfqd->rq_queued--;
155fead9 2424 cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags);
65299a3b
CH
2425 if (rq->cmd_flags & REQ_PRIO) {
2426 WARN_ON(!cfqq->prio_pending);
2427 cfqq->prio_pending--;
b53d1ed7 2428 }
1da177e4
LT
2429}
2430
165125e1
JA
2431static int cfq_merge(struct request_queue *q, struct request **req,
2432 struct bio *bio)
1da177e4
LT
2433{
2434 struct cfq_data *cfqd = q->elevator->elevator_data;
2435 struct request *__rq;
1da177e4 2436
206dc69b 2437 __rq = cfq_find_rq_fmerge(cfqd, bio);
22e2c507 2438 if (__rq && elv_rq_merge_ok(__rq, bio)) {
9817064b
JA
2439 *req = __rq;
2440 return ELEVATOR_FRONT_MERGE;
1da177e4
LT
2441 }
2442
2443 return ELEVATOR_NO_MERGE;
1da177e4
LT
2444}
2445
165125e1 2446static void cfq_merged_request(struct request_queue *q, struct request *req,
21183b07 2447 int type)
1da177e4 2448{
21183b07 2449 if (type == ELEVATOR_FRONT_MERGE) {
5e705374 2450 struct cfq_queue *cfqq = RQ_CFQQ(req);
1da177e4 2451
5e705374 2452 cfq_reposition_rq_rb(cfqq, req);
1da177e4 2453 }
1da177e4
LT
2454}
2455
812d4026
DS
2456static void cfq_bio_merged(struct request_queue *q, struct request *req,
2457 struct bio *bio)
2458{
155fead9 2459 cfqg_stats_update_io_merged(RQ_CFQG(req), bio->bi_rw);
812d4026
DS
2460}
2461
1da177e4 2462static void
165125e1 2463cfq_merged_requests(struct request_queue *q, struct request *rq,
1da177e4
LT
2464 struct request *next)
2465{
cf7c25cf 2466 struct cfq_queue *cfqq = RQ_CFQQ(rq);
4a0b75c7
SL
2467 struct cfq_data *cfqd = q->elevator->elevator_data;
2468
22e2c507
JA
2469 /*
2470 * reposition in fifo if next is older than rq
2471 */
2472 if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
8b4922d3 2473 time_before(next->fifo_time, rq->fifo_time) &&
3d106fba 2474 cfqq == RQ_CFQQ(next)) {
22e2c507 2475 list_move(&rq->queuelist, &next->queuelist);
8b4922d3 2476 rq->fifo_time = next->fifo_time;
30996f40 2477 }
22e2c507 2478
cf7c25cf
CZ
2479 if (cfqq->next_rq == next)
2480 cfqq->next_rq = rq;
b4878f24 2481 cfq_remove_request(next);
155fead9 2482 cfqg_stats_update_io_merged(RQ_CFQG(rq), next->cmd_flags);
4a0b75c7
SL
2483
2484 cfqq = RQ_CFQQ(next);
2485 /*
2486 * all requests of this queue are merged to other queues, delete it
2487 * from the service tree. If it's the active_queue,
2488 * cfq_dispatch_requests() will choose to expire it or do idle
2489 */
2490 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list) &&
2491 cfqq != cfqd->active_queue)
2492 cfq_del_cfqq_rr(cfqd, cfqq);
22e2c507
JA
2493}
2494
165125e1 2495static int cfq_allow_merge(struct request_queue *q, struct request *rq,
da775265
JA
2496 struct bio *bio)
2497{
2498 struct cfq_data *cfqd = q->elevator->elevator_data;
c5869807 2499 struct cfq_io_cq *cic;
da775265 2500 struct cfq_queue *cfqq;
da775265
JA
2501
2502 /*
ec8acb69 2503 * Disallow merge of a sync bio into an async request.
da775265 2504 */
91fac317 2505 if (cfq_bio_sync(bio) && !rq_is_sync(rq))
a6151c3a 2506 return false;
da775265
JA
2507
2508 /*
f1a4f4d3 2509 * Lookup the cfqq that this bio will be queued with and allow
07c2bd37 2510 * merge only if rq is queued there.
f1a4f4d3 2511 */
07c2bd37
TH
2512 cic = cfq_cic_lookup(cfqd, current->io_context);
2513 if (!cic)
2514 return false;
719d3402 2515
91fac317 2516 cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
a6151c3a 2517 return cfqq == RQ_CFQQ(rq);
da775265
JA
2518}
2519
812df48d
DS
2520static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2521{
2522 del_timer(&cfqd->idle_slice_timer);
155fead9 2523 cfqg_stats_update_idle_time(cfqq->cfqg);
812df48d
DS
2524}
2525
febffd61
JA
2526static void __cfq_set_active_queue(struct cfq_data *cfqd,
2527 struct cfq_queue *cfqq)
22e2c507
JA
2528{
2529 if (cfqq) {
3bf10fea 2530 cfq_log_cfqq(cfqd, cfqq, "set_active wl_class:%d wl_type:%d",
4d2ceea4 2531 cfqd->serving_wl_class, cfqd->serving_wl_type);
155fead9 2532 cfqg_stats_update_avg_queue_size(cfqq->cfqg);
62a37f6b
JT
2533 cfqq->slice_start = 0;
2534 cfqq->dispatch_start = jiffies;
2535 cfqq->allocated_slice = 0;
2536 cfqq->slice_end = 0;
2537 cfqq->slice_dispatch = 0;
2538 cfqq->nr_sectors = 0;
2539
2540 cfq_clear_cfqq_wait_request(cfqq);
2541 cfq_clear_cfqq_must_dispatch(cfqq);
2542 cfq_clear_cfqq_must_alloc_slice(cfqq);
2543 cfq_clear_cfqq_fifo_expire(cfqq);
2544 cfq_mark_cfqq_slice_new(cfqq);
2545
2546 cfq_del_timer(cfqd, cfqq);
22e2c507
JA
2547 }
2548
2549 cfqd->active_queue = cfqq;
2550}
2551
7b14e3b5
JA
2552/*
2553 * current cfqq expired its slice (or was too idle), select new one
2554 */
2555static void
2556__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
e5ff082e 2557 bool timed_out)
7b14e3b5 2558{
7b679138
JA
2559 cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
2560
7b14e3b5 2561 if (cfq_cfqq_wait_request(cfqq))
812df48d 2562 cfq_del_timer(cfqd, cfqq);
7b14e3b5 2563
7b14e3b5 2564 cfq_clear_cfqq_wait_request(cfqq);
f75edf2d 2565 cfq_clear_cfqq_wait_busy(cfqq);
7b14e3b5 2566
ae54abed
SL
2567 /*
2568 * If this cfqq is shared between multiple processes, check to
2569 * make sure that those processes are still issuing I/Os within
2570 * the mean seek distance. If not, it may be time to break the
2571 * queues apart again.
2572 */
2573 if (cfq_cfqq_coop(cfqq) && CFQQ_SEEKY(cfqq))
2574 cfq_mark_cfqq_split_coop(cfqq);
2575
7b14e3b5 2576 /*
6084cdda 2577 * store what was left of this slice, if the queue idled/timed out
7b14e3b5 2578 */
c553f8e3
SL
2579 if (timed_out) {
2580 if (cfq_cfqq_slice_new(cfqq))
ba5bd520 2581 cfqq->slice_resid = cfq_scaled_cfqq_slice(cfqd, cfqq);
c553f8e3
SL
2582 else
2583 cfqq->slice_resid = cfqq->slice_end - jiffies;
7b679138
JA
2584 cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid);
2585 }
7b14e3b5 2586
e5ff082e 2587 cfq_group_served(cfqd, cfqq->cfqg, cfqq);
dae739eb 2588
f04a6424
VG
2589 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
2590 cfq_del_cfqq_rr(cfqd, cfqq);
2591
edd75ffd 2592 cfq_resort_rr_list(cfqd, cfqq);
7b14e3b5
JA
2593
2594 if (cfqq == cfqd->active_queue)
2595 cfqd->active_queue = NULL;
2596
2597 if (cfqd->active_cic) {
11a3122f 2598 put_io_context(cfqd->active_cic->icq.ioc);
7b14e3b5
JA
2599 cfqd->active_cic = NULL;
2600 }
7b14e3b5
JA
2601}
2602
e5ff082e 2603static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
7b14e3b5
JA
2604{
2605 struct cfq_queue *cfqq = cfqd->active_queue;
2606
2607 if (cfqq)
e5ff082e 2608 __cfq_slice_expired(cfqd, cfqq, timed_out);
7b14e3b5
JA
2609}
2610
498d3aa2
JA
2611/*
2612 * Get next queue for service. Unless we have a queue preemption,
2613 * we'll simply select the first cfqq in the service tree.
2614 */
6d048f53 2615static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
22e2c507 2616{
34b98d03
VG
2617 struct cfq_rb_root *st = st_for(cfqd->serving_group,
2618 cfqd->serving_wl_class, cfqd->serving_wl_type);
d9e7620e 2619
f04a6424
VG
2620 if (!cfqd->rq_queued)
2621 return NULL;
2622
1fa8f6d6 2623 /* There is nothing to dispatch */
34b98d03 2624 if (!st)
1fa8f6d6 2625 return NULL;
34b98d03 2626 if (RB_EMPTY_ROOT(&st->rb))
c0324a02 2627 return NULL;
34b98d03 2628 return cfq_rb_first(st);
6d048f53
JA
2629}
2630
f04a6424
VG
2631static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd)
2632{
25fb5169 2633 struct cfq_group *cfqg;
f04a6424
VG
2634 struct cfq_queue *cfqq;
2635 int i, j;
2636 struct cfq_rb_root *st;
2637
2638 if (!cfqd->rq_queued)
2639 return NULL;
2640
25fb5169
VG
2641 cfqg = cfq_get_next_cfqg(cfqd);
2642 if (!cfqg)
2643 return NULL;
2644
f04a6424
VG
2645 for_each_cfqg_st(cfqg, i, j, st)
2646 if ((cfqq = cfq_rb_first(st)) != NULL)
2647 return cfqq;
2648 return NULL;
2649}
2650
498d3aa2
JA
2651/*
2652 * Get and set a new active queue for service.
2653 */
a36e71f9
JA
2654static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
2655 struct cfq_queue *cfqq)
6d048f53 2656{
e00ef799 2657 if (!cfqq)
a36e71f9 2658 cfqq = cfq_get_next_queue(cfqd);
6d048f53 2659
22e2c507 2660 __cfq_set_active_queue(cfqd, cfqq);
3b18152c 2661 return cfqq;
22e2c507
JA
2662}
2663
d9e7620e
JA
2664static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
2665 struct request *rq)
2666{
83096ebf
TH
2667 if (blk_rq_pos(rq) >= cfqd->last_position)
2668 return blk_rq_pos(rq) - cfqd->last_position;
d9e7620e 2669 else
83096ebf 2670 return cfqd->last_position - blk_rq_pos(rq);
d9e7620e
JA
2671}
2672
b2c18e1e 2673static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
e9ce335d 2674 struct request *rq)
6d048f53 2675{
e9ce335d 2676 return cfq_dist_from_last(cfqd, rq) <= CFQQ_CLOSE_THR;
6d048f53
JA
2677}
2678
a36e71f9
JA
2679static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
2680 struct cfq_queue *cur_cfqq)
2681{
f2d1f0ae 2682 struct rb_root *root = &cfqd->prio_trees[cur_cfqq->org_ioprio];
a36e71f9
JA
2683 struct rb_node *parent, *node;
2684 struct cfq_queue *__cfqq;
2685 sector_t sector = cfqd->last_position;
2686
2687 if (RB_EMPTY_ROOT(root))
2688 return NULL;
2689
2690 /*
2691 * First, if we find a request starting at the end of the last
2692 * request, choose it.
2693 */
f2d1f0ae 2694 __cfqq = cfq_prio_tree_lookup(cfqd, root, sector, &parent, NULL);
a36e71f9
JA
2695 if (__cfqq)
2696 return __cfqq;
2697
2698 /*
2699 * If the exact sector wasn't found, the parent of the NULL leaf
2700 * will contain the closest sector.
2701 */
2702 __cfqq = rb_entry(parent, struct cfq_queue, p_node);
e9ce335d 2703 if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
a36e71f9
JA
2704 return __cfqq;
2705
2e46e8b2 2706 if (blk_rq_pos(__cfqq->next_rq) < sector)
a36e71f9
JA
2707 node = rb_next(&__cfqq->p_node);
2708 else
2709 node = rb_prev(&__cfqq->p_node);
2710 if (!node)
2711 return NULL;
2712
2713 __cfqq = rb_entry(node, struct cfq_queue, p_node);
e9ce335d 2714 if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
a36e71f9
JA
2715 return __cfqq;
2716
2717 return NULL;
2718}
2719
2720/*
2721 * cfqd - obvious
2722 * cur_cfqq - passed in so that we don't decide that the current queue is
2723 * closely cooperating with itself.
2724 *
2725 * So, basically we're assuming that that cur_cfqq has dispatched at least
2726 * one request, and that cfqd->last_position reflects a position on the disk
2727 * associated with the I/O issued by cur_cfqq. I'm not sure this is a valid
2728 * assumption.
2729 */
2730static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
b3b6d040 2731 struct cfq_queue *cur_cfqq)
6d048f53 2732{
a36e71f9
JA
2733 struct cfq_queue *cfqq;
2734
39c01b21
DS
2735 if (cfq_class_idle(cur_cfqq))
2736 return NULL;
e6c5bc73
JM
2737 if (!cfq_cfqq_sync(cur_cfqq))
2738 return NULL;
2739 if (CFQQ_SEEKY(cur_cfqq))
2740 return NULL;
2741
b9d8f4c7
GJ
2742 /*
2743 * Don't search priority tree if it's the only queue in the group.
2744 */
2745 if (cur_cfqq->cfqg->nr_cfqq == 1)
2746 return NULL;
2747
6d048f53 2748 /*
d9e7620e
JA
2749 * We should notice if some of the queues are cooperating, eg
2750 * working closely on the same area of the disk. In that case,
2751 * we can group them together and don't waste time idling.
6d048f53 2752 */
a36e71f9
JA
2753 cfqq = cfqq_close(cfqd, cur_cfqq);
2754 if (!cfqq)
2755 return NULL;
2756
8682e1f1
VG
2757 /* If new queue belongs to different cfq_group, don't choose it */
2758 if (cur_cfqq->cfqg != cfqq->cfqg)
2759 return NULL;
2760
df5fe3e8
JM
2761 /*
2762 * It only makes sense to merge sync queues.
2763 */
2764 if (!cfq_cfqq_sync(cfqq))
2765 return NULL;
e6c5bc73
JM
2766 if (CFQQ_SEEKY(cfqq))
2767 return NULL;
df5fe3e8 2768
c0324a02
CZ
2769 /*
2770 * Do not merge queues of different priority classes
2771 */
2772 if (cfq_class_rt(cfqq) != cfq_class_rt(cur_cfqq))
2773 return NULL;
2774
a36e71f9 2775 return cfqq;
6d048f53
JA
2776}
2777
a6d44e98
CZ
2778/*
2779 * Determine whether we should enforce idle window for this queue.
2780 */
2781
2782static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2783{
3bf10fea 2784 enum wl_class_t wl_class = cfqq_class(cfqq);
34b98d03 2785 struct cfq_rb_root *st = cfqq->service_tree;
a6d44e98 2786
34b98d03
VG
2787 BUG_ON(!st);
2788 BUG_ON(!st->count);
f04a6424 2789
b6508c16
VG
2790 if (!cfqd->cfq_slice_idle)
2791 return false;
2792
a6d44e98 2793 /* We never do for idle class queues. */
3bf10fea 2794 if (wl_class == IDLE_WORKLOAD)
a6d44e98
CZ
2795 return false;
2796
2797 /* We do for queues that were marked with idle window flag. */
3c764b7a
SL
2798 if (cfq_cfqq_idle_window(cfqq) &&
2799 !(blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag))
a6d44e98
CZ
2800 return true;
2801
2802 /*
2803 * Otherwise, we do only if they are the last ones
2804 * in their service tree.
2805 */
34b98d03
VG
2806 if (st->count == 1 && cfq_cfqq_sync(cfqq) &&
2807 !cfq_io_thinktime_big(cfqd, &st->ttime, false))
c1e44756 2808 return true;
34b98d03 2809 cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d", st->count);
c1e44756 2810 return false;
a6d44e98
CZ
2811}
2812
6d048f53 2813static void cfq_arm_slice_timer(struct cfq_data *cfqd)
22e2c507 2814{
1792669c 2815 struct cfq_queue *cfqq = cfqd->active_queue;
c5869807 2816 struct cfq_io_cq *cic;
80bdf0c7 2817 unsigned long sl, group_idle = 0;
7b14e3b5 2818
a68bbddb 2819 /*
f7d7b7a7
JA
2820 * SSD device without seek penalty, disable idling. But only do so
2821 * for devices that support queuing, otherwise we still have a problem
2822 * with sync vs async workloads.
a68bbddb 2823 */
f7d7b7a7 2824 if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)
a68bbddb
JA
2825 return;
2826
dd67d051 2827 WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
6d048f53 2828 WARN_ON(cfq_cfqq_slice_new(cfqq));
22e2c507
JA
2829
2830 /*
2831 * idle is disabled, either manually or by past process history
2832 */
80bdf0c7
VG
2833 if (!cfq_should_idle(cfqd, cfqq)) {
2834 /* no queue idling. Check for group idling */
2835 if (cfqd->cfq_group_idle)
2836 group_idle = cfqd->cfq_group_idle;
2837 else
2838 return;
2839 }
6d048f53 2840
7b679138 2841 /*
8e550632 2842 * still active requests from this queue, don't idle
7b679138 2843 */
8e550632 2844 if (cfqq->dispatched)
7b679138
JA
2845 return;
2846
22e2c507
JA
2847 /*
2848 * task has exited, don't wait
2849 */
206dc69b 2850 cic = cfqd->active_cic;
f6e8d01b 2851 if (!cic || !atomic_read(&cic->icq.ioc->active_ref))
6d048f53
JA
2852 return;
2853
355b659c
CZ
2854 /*
2855 * If our average think time is larger than the remaining time
2856 * slice, then don't idle. This avoids overrunning the allotted
2857 * time slice.
2858 */
383cd721
SL
2859 if (sample_valid(cic->ttime.ttime_samples) &&
2860 (cfqq->slice_end - jiffies < cic->ttime.ttime_mean)) {
fd16d263 2861 cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%lu",
383cd721 2862 cic->ttime.ttime_mean);
355b659c 2863 return;
b1ffe737 2864 }
355b659c 2865
80bdf0c7
VG
2866 /* There are other queues in the group, don't do group idle */
2867 if (group_idle && cfqq->cfqg->nr_cfqq > 1)
2868 return;
2869
3b18152c 2870 cfq_mark_cfqq_wait_request(cfqq);
22e2c507 2871
80bdf0c7
VG
2872 if (group_idle)
2873 sl = cfqd->cfq_group_idle;
2874 else
2875 sl = cfqd->cfq_slice_idle;
206dc69b 2876
7b14e3b5 2877 mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
155fead9 2878 cfqg_stats_set_start_idle_time(cfqq->cfqg);
80bdf0c7
VG
2879 cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl,
2880 group_idle ? 1 : 0);
1da177e4
LT
2881}
2882
498d3aa2
JA
2883/*
2884 * Move request from internal lists to the request queue dispatch list.
2885 */
165125e1 2886static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
1da177e4 2887{
3ed9a296 2888 struct cfq_data *cfqd = q->elevator->elevator_data;
5e705374 2889 struct cfq_queue *cfqq = RQ_CFQQ(rq);
22e2c507 2890
7b679138
JA
2891 cfq_log_cfqq(cfqd, cfqq, "dispatch_insert");
2892
06d21886 2893 cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq);
5380a101 2894 cfq_remove_request(rq);
6d048f53 2895 cfqq->dispatched++;
80bdf0c7 2896 (RQ_CFQG(rq))->dispatched++;
5380a101 2897 elv_dispatch_sort(q, rq);
3ed9a296 2898
53c583d2 2899 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
c4e7893e 2900 cfqq->nr_sectors += blk_rq_sectors(rq);
155fead9 2901 cfqg_stats_update_dispatch(cfqq->cfqg, blk_rq_bytes(rq), rq->cmd_flags);
1da177e4
LT
2902}
2903
2904/*
2905 * return expired entry, or NULL to just start from scratch in rbtree
2906 */
febffd61 2907static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
1da177e4 2908{
30996f40 2909 struct request *rq = NULL;
1da177e4 2910
3b18152c 2911 if (cfq_cfqq_fifo_expire(cfqq))
1da177e4 2912 return NULL;
cb887411
JA
2913
2914 cfq_mark_cfqq_fifo_expire(cfqq);
2915
89850f7e
JA
2916 if (list_empty(&cfqq->fifo))
2917 return NULL;
1da177e4 2918
89850f7e 2919 rq = rq_entry_fifo(cfqq->fifo.next);
8b4922d3 2920 if (time_before(jiffies, rq->fifo_time))
7b679138 2921 rq = NULL;
1da177e4 2922
30996f40 2923 cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
6d048f53 2924 return rq;
1da177e4
LT
2925}
2926
22e2c507
JA
2927static inline int
2928cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2929{
2930 const int base_rq = cfqd->cfq_slice_async_rq;
1da177e4 2931
22e2c507 2932 WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
1da177e4 2933
b9f8ce05 2934 return 2 * base_rq * (IOPRIO_BE_NR - cfqq->ioprio);
1da177e4
LT
2935}
2936
df5fe3e8
JM
2937/*
2938 * Must be called with the queue_lock held.
2939 */
2940static int cfqq_process_refs(struct cfq_queue *cfqq)
2941{
2942 int process_refs, io_refs;
2943
2944 io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE];
30d7b944 2945 process_refs = cfqq->ref - io_refs;
df5fe3e8
JM
2946 BUG_ON(process_refs < 0);
2947 return process_refs;
2948}
2949
2950static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
2951{
e6c5bc73 2952 int process_refs, new_process_refs;
df5fe3e8
JM
2953 struct cfq_queue *__cfqq;
2954
c10b61f0
JM
2955 /*
2956 * If there are no process references on the new_cfqq, then it is
2957 * unsafe to follow the ->new_cfqq chain as other cfqq's in the
2958 * chain may have dropped their last reference (not just their
2959 * last process reference).
2960 */
2961 if (!cfqq_process_refs(new_cfqq))
2962 return;
2963
df5fe3e8
JM
2964 /* Avoid a circular list and skip interim queue merges */
2965 while ((__cfqq = new_cfqq->new_cfqq)) {
2966 if (__cfqq == cfqq)
2967 return;
2968 new_cfqq = __cfqq;
2969 }
2970
2971 process_refs = cfqq_process_refs(cfqq);
c10b61f0 2972 new_process_refs = cfqq_process_refs(new_cfqq);
df5fe3e8
JM
2973 /*
2974 * If the process for the cfqq has gone away, there is no
2975 * sense in merging the queues.
2976 */
c10b61f0 2977 if (process_refs == 0 || new_process_refs == 0)
df5fe3e8
JM
2978 return;
2979
e6c5bc73
JM
2980 /*
2981 * Merge in the direction of the lesser amount of work.
2982 */
e6c5bc73
JM
2983 if (new_process_refs >= process_refs) {
2984 cfqq->new_cfqq = new_cfqq;
30d7b944 2985 new_cfqq->ref += process_refs;
e6c5bc73
JM
2986 } else {
2987 new_cfqq->new_cfqq = cfqq;
30d7b944 2988 cfqq->ref += new_process_refs;
e6c5bc73 2989 }
df5fe3e8
JM
2990}
2991
6d816ec7 2992static enum wl_type_t cfq_choose_wl_type(struct cfq_data *cfqd,
3bf10fea 2993 struct cfq_group *cfqg, enum wl_class_t wl_class)
718eee05
CZ
2994{
2995 struct cfq_queue *queue;
2996 int i;
2997 bool key_valid = false;
2998 unsigned long lowest_key = 0;
2999 enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD;
3000
65b32a57
VG
3001 for (i = 0; i <= SYNC_WORKLOAD; ++i) {
3002 /* select the one with lowest rb_key */
34b98d03 3003 queue = cfq_rb_first(st_for(cfqg, wl_class, i));
718eee05
CZ
3004 if (queue &&
3005 (!key_valid || time_before(queue->rb_key, lowest_key))) {
3006 lowest_key = queue->rb_key;
3007 cur_best = i;
3008 key_valid = true;
3009 }
3010 }
3011
3012 return cur_best;
3013}
3014
6d816ec7
VG
3015static void
3016choose_wl_class_and_type(struct cfq_data *cfqd, struct cfq_group *cfqg)
718eee05 3017{
718eee05
CZ
3018 unsigned slice;
3019 unsigned count;
cdb16e8f 3020 struct cfq_rb_root *st;
58ff82f3 3021 unsigned group_slice;
4d2ceea4 3022 enum wl_class_t original_class = cfqd->serving_wl_class;
1fa8f6d6 3023
718eee05 3024 /* Choose next priority. RT > BE > IDLE */
58ff82f3 3025 if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg))
4d2ceea4 3026 cfqd->serving_wl_class = RT_WORKLOAD;
58ff82f3 3027 else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg))
4d2ceea4 3028 cfqd->serving_wl_class = BE_WORKLOAD;
718eee05 3029 else {
4d2ceea4 3030 cfqd->serving_wl_class = IDLE_WORKLOAD;
718eee05
CZ
3031 cfqd->workload_expires = jiffies + 1;
3032 return;
3033 }
3034
4d2ceea4 3035 if (original_class != cfqd->serving_wl_class)
e4ea0c16
SL
3036 goto new_workload;
3037
718eee05
CZ
3038 /*
3039 * For RT and BE, we have to choose also the type
3040 * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload
3041 * expiration time
3042 */
34b98d03 3043 st = st_for(cfqg, cfqd->serving_wl_class, cfqd->serving_wl_type);
cdb16e8f 3044 count = st->count;
718eee05
CZ
3045
3046 /*
65b32a57 3047 * check workload expiration, and that we still have other queues ready
718eee05 3048 */
65b32a57 3049 if (count && !time_after(jiffies, cfqd->workload_expires))
718eee05
CZ
3050 return;
3051
e4ea0c16 3052new_workload:
718eee05 3053 /* otherwise select new workload type */
6d816ec7 3054 cfqd->serving_wl_type = cfq_choose_wl_type(cfqd, cfqg,
4d2ceea4 3055 cfqd->serving_wl_class);
34b98d03 3056 st = st_for(cfqg, cfqd->serving_wl_class, cfqd->serving_wl_type);
cdb16e8f 3057 count = st->count;
718eee05
CZ
3058
3059 /*
3060 * the workload slice is computed as a fraction of target latency
3061 * proportional to the number of queues in that workload, over
3062 * all the queues in the same priority class
3063 */
58ff82f3
VG
3064 group_slice = cfq_group_slice(cfqd, cfqg);
3065
3066 slice = group_slice * count /
4d2ceea4
VG
3067 max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_wl_class],
3068 cfq_group_busy_queues_wl(cfqd->serving_wl_class, cfqd,
3bf10fea 3069 cfqg));
718eee05 3070
4d2ceea4 3071 if (cfqd->serving_wl_type == ASYNC_WORKLOAD) {
f26bd1f0
VG
3072 unsigned int tmp;
3073
3074 /*
3075 * Async queues are currently system wide. Just taking
3076 * proportion of queues with-in same group will lead to higher
3077 * async ratio system wide as generally root group is going
3078 * to have higher weight. A more accurate thing would be to
3079 * calculate system wide asnc/sync ratio.
3080 */
5bf14c07
TM
3081 tmp = cfqd->cfq_target_latency *
3082 cfqg_busy_async_queues(cfqd, cfqg);
f26bd1f0
VG
3083 tmp = tmp/cfqd->busy_queues;
3084 slice = min_t(unsigned, slice, tmp);
3085
718eee05
CZ
3086 /* async workload slice is scaled down according to
3087 * the sync/async slice ratio. */
3088 slice = slice * cfqd->cfq_slice[0] / cfqd->cfq_slice[1];
f26bd1f0 3089 } else
718eee05
CZ
3090 /* sync workload slice is at least 2 * cfq_slice_idle */
3091 slice = max(slice, 2 * cfqd->cfq_slice_idle);
3092
3093 slice = max_t(unsigned, slice, CFQ_MIN_TT);
b1ffe737 3094 cfq_log(cfqd, "workload slice:%d", slice);
718eee05
CZ
3095 cfqd->workload_expires = jiffies + slice;
3096}
3097
1fa8f6d6
VG
3098static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd)
3099{
3100 struct cfq_rb_root *st = &cfqd->grp_service_tree;
25bc6b07 3101 struct cfq_group *cfqg;
1fa8f6d6
VG
3102
3103 if (RB_EMPTY_ROOT(&st->rb))
3104 return NULL;
25bc6b07 3105 cfqg = cfq_rb_first_group(st);
25bc6b07
VG
3106 update_min_vdisktime(st);
3107 return cfqg;
1fa8f6d6
VG
3108}
3109
cdb16e8f
VG
3110static void cfq_choose_cfqg(struct cfq_data *cfqd)
3111{
1fa8f6d6
VG
3112 struct cfq_group *cfqg = cfq_get_next_cfqg(cfqd);
3113
3114 cfqd->serving_group = cfqg;
dae739eb
VG
3115
3116 /* Restore the workload type data */
4d2ceea4
VG
3117 if (cfqg->saved_wl_slice) {
3118 cfqd->workload_expires = jiffies + cfqg->saved_wl_slice;
3119 cfqd->serving_wl_type = cfqg->saved_wl_type;
3120 cfqd->serving_wl_class = cfqg->saved_wl_class;
66ae2919
GJ
3121 } else
3122 cfqd->workload_expires = jiffies - 1;
3123
6d816ec7 3124 choose_wl_class_and_type(cfqd, cfqg);
cdb16e8f
VG
3125}
3126
22e2c507 3127/*
498d3aa2
JA
3128 * Select a queue for service. If we have a current active queue,
3129 * check whether to continue servicing it, or retrieve and set a new one.
22e2c507 3130 */
1b5ed5e1 3131static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
1da177e4 3132{
a36e71f9 3133 struct cfq_queue *cfqq, *new_cfqq = NULL;
1da177e4 3134
22e2c507
JA
3135 cfqq = cfqd->active_queue;
3136 if (!cfqq)
3137 goto new_queue;
1da177e4 3138
f04a6424
VG
3139 if (!cfqd->rq_queued)
3140 return NULL;
c244bb50
VG
3141
3142 /*
3143 * We were waiting for group to get backlogged. Expire the queue
3144 */
3145 if (cfq_cfqq_wait_busy(cfqq) && !RB_EMPTY_ROOT(&cfqq->sort_list))
3146 goto expire;
3147
22e2c507 3148 /*
6d048f53 3149 * The active queue has run out of time, expire it and select new.
22e2c507 3150 */
7667aa06
VG
3151 if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) {
3152 /*
3153 * If slice had not expired at the completion of last request
3154 * we might not have turned on wait_busy flag. Don't expire
3155 * the queue yet. Allow the group to get backlogged.
3156 *
3157 * The very fact that we have used the slice, that means we
3158 * have been idling all along on this queue and it should be
3159 * ok to wait for this request to complete.
3160 */
82bbbf28
VG
3161 if (cfqq->cfqg->nr_cfqq == 1 && RB_EMPTY_ROOT(&cfqq->sort_list)
3162 && cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
3163 cfqq = NULL;
7667aa06 3164 goto keep_queue;
82bbbf28 3165 } else
80bdf0c7 3166 goto check_group_idle;
7667aa06 3167 }
1da177e4 3168
22e2c507 3169 /*
6d048f53
JA
3170 * The active queue has requests and isn't expired, allow it to
3171 * dispatch.
22e2c507 3172 */
dd67d051 3173 if (!RB_EMPTY_ROOT(&cfqq->sort_list))
22e2c507 3174 goto keep_queue;
6d048f53 3175
a36e71f9
JA
3176 /*
3177 * If another queue has a request waiting within our mean seek
3178 * distance, let it run. The expire code will check for close
3179 * cooperators and put the close queue at the front of the service
df5fe3e8 3180 * tree. If possible, merge the expiring queue with the new cfqq.
a36e71f9 3181 */
b3b6d040 3182 new_cfqq = cfq_close_cooperator(cfqd, cfqq);
df5fe3e8
JM
3183 if (new_cfqq) {
3184 if (!cfqq->new_cfqq)
3185 cfq_setup_merge(cfqq, new_cfqq);
a36e71f9 3186 goto expire;
df5fe3e8 3187 }
a36e71f9 3188
6d048f53
JA
3189 /*
3190 * No requests pending. If the active queue still has requests in
3191 * flight or is idling for a new request, allow either of these
3192 * conditions to happen (or time out) before selecting a new queue.
3193 */
80bdf0c7
VG
3194 if (timer_pending(&cfqd->idle_slice_timer)) {
3195 cfqq = NULL;
3196 goto keep_queue;
3197 }
3198
8e1ac665
SL
3199 /*
3200 * This is a deep seek queue, but the device is much faster than
3201 * the queue can deliver, don't idle
3202 **/
3203 if (CFQQ_SEEKY(cfqq) && cfq_cfqq_idle_window(cfqq) &&
3204 (cfq_cfqq_slice_new(cfqq) ||
3205 (cfqq->slice_end - jiffies > jiffies - cfqq->slice_start))) {
3206 cfq_clear_cfqq_deep(cfqq);
3207 cfq_clear_cfqq_idle_window(cfqq);
3208 }
3209
80bdf0c7
VG
3210 if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
3211 cfqq = NULL;
3212 goto keep_queue;
3213 }
3214
3215 /*
3216 * If group idle is enabled and there are requests dispatched from
3217 * this group, wait for requests to complete.
3218 */
3219check_group_idle:
7700fc4f
SL
3220 if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1 &&
3221 cfqq->cfqg->dispatched &&
3222 !cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true)) {
caaa5f9f
JA
3223 cfqq = NULL;
3224 goto keep_queue;
22e2c507
JA
3225 }
3226
3b18152c 3227expire:
e5ff082e 3228 cfq_slice_expired(cfqd, 0);
3b18152c 3229new_queue:
718eee05
CZ
3230 /*
3231 * Current queue expired. Check if we have to switch to a new
3232 * service tree
3233 */
3234 if (!new_cfqq)
cdb16e8f 3235 cfq_choose_cfqg(cfqd);
718eee05 3236
a36e71f9 3237 cfqq = cfq_set_active_queue(cfqd, new_cfqq);
22e2c507 3238keep_queue:
3b18152c 3239 return cfqq;
22e2c507
JA
3240}
3241
febffd61 3242static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
d9e7620e
JA
3243{
3244 int dispatched = 0;
3245
3246 while (cfqq->next_rq) {
3247 cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
3248 dispatched++;
3249 }
3250
3251 BUG_ON(!list_empty(&cfqq->fifo));
f04a6424
VG
3252
3253 /* By default cfqq is not expired if it is empty. Do it explicitly */
e5ff082e 3254 __cfq_slice_expired(cfqq->cfqd, cfqq, 0);
d9e7620e
JA
3255 return dispatched;
3256}
3257
498d3aa2
JA
3258/*
3259 * Drain our current requests. Used for barriers and when switching
3260 * io schedulers on-the-fly.
3261 */
d9e7620e 3262static int cfq_forced_dispatch(struct cfq_data *cfqd)
1b5ed5e1 3263{
0871714e 3264 struct cfq_queue *cfqq;
d9e7620e 3265 int dispatched = 0;
cdb16e8f 3266
3440c49f 3267 /* Expire the timeslice of the current active queue first */
e5ff082e 3268 cfq_slice_expired(cfqd, 0);
3440c49f
DS
3269 while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) {
3270 __cfq_set_active_queue(cfqd, cfqq);
f04a6424 3271 dispatched += __cfq_forced_dispatch_cfqq(cfqq);
3440c49f 3272 }
1b5ed5e1 3273
1b5ed5e1
TH
3274 BUG_ON(cfqd->busy_queues);
3275
6923715a 3276 cfq_log(cfqd, "forced_dispatch=%d", dispatched);
1b5ed5e1
TH
3277 return dispatched;
3278}
3279
abc3c744
SL
3280static inline bool cfq_slice_used_soon(struct cfq_data *cfqd,
3281 struct cfq_queue *cfqq)
3282{
3283 /* the queue hasn't finished any request, can't estimate */
3284 if (cfq_cfqq_slice_new(cfqq))
c1e44756 3285 return true;
abc3c744
SL
3286 if (time_after(jiffies + cfqd->cfq_slice_idle * cfqq->dispatched,
3287 cfqq->slice_end))
c1e44756 3288 return true;
abc3c744 3289
c1e44756 3290 return false;
abc3c744
SL
3291}
3292
0b182d61 3293static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2f5cb738 3294{
2f5cb738 3295 unsigned int max_dispatch;
22e2c507 3296
5ad531db
JA
3297 /*
3298 * Drain async requests before we start sync IO
3299 */
53c583d2 3300 if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_flight[BLK_RW_ASYNC])
0b182d61 3301 return false;
5ad531db 3302
2f5cb738
JA
3303 /*
3304 * If this is an async queue and we have sync IO in flight, let it wait
3305 */
53c583d2 3306 if (cfqd->rq_in_flight[BLK_RW_SYNC] && !cfq_cfqq_sync(cfqq))
0b182d61 3307 return false;
2f5cb738 3308
abc3c744 3309 max_dispatch = max_t(unsigned int, cfqd->cfq_quantum / 2, 1);
2f5cb738
JA
3310 if (cfq_class_idle(cfqq))
3311 max_dispatch = 1;
b4878f24 3312
2f5cb738
JA
3313 /*
3314 * Does this cfqq already have too much IO in flight?
3315 */
3316 if (cfqq->dispatched >= max_dispatch) {
ef8a41df 3317 bool promote_sync = false;
2f5cb738
JA
3318 /*
3319 * idle queue must always only have a single IO in flight
3320 */
3ed9a296 3321 if (cfq_class_idle(cfqq))
0b182d61 3322 return false;
3ed9a296 3323
ef8a41df 3324 /*
c4ade94f
LS
3325 * If there is only one sync queue
3326 * we can ignore async queue here and give the sync
ef8a41df
SL
3327 * queue no dispatch limit. The reason is a sync queue can
3328 * preempt async queue, limiting the sync queue doesn't make
3329 * sense. This is useful for aiostress test.
3330 */
c4ade94f
LS
3331 if (cfq_cfqq_sync(cfqq) && cfqd->busy_sync_queues == 1)
3332 promote_sync = true;
ef8a41df 3333
2f5cb738
JA
3334 /*
3335 * We have other queues, don't allow more IO from this one
3336 */
ef8a41df
SL
3337 if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq) &&
3338 !promote_sync)
0b182d61 3339 return false;
9ede209e 3340
365722bb 3341 /*
474b18cc 3342 * Sole queue user, no limit
365722bb 3343 */
ef8a41df 3344 if (cfqd->busy_queues == 1 || promote_sync)
abc3c744
SL
3345 max_dispatch = -1;
3346 else
3347 /*
3348 * Normally we start throttling cfqq when cfq_quantum/2
3349 * requests have been dispatched. But we can drive
3350 * deeper queue depths at the beginning of slice
3351 * subjected to upper limit of cfq_quantum.
3352 * */
3353 max_dispatch = cfqd->cfq_quantum;
8e296755
JA
3354 }
3355
3356 /*
3357 * Async queues must wait a bit before being allowed dispatch.
3358 * We also ramp up the dispatch depth gradually for async IO,
3359 * based on the last sync IO we serviced
3360 */
963b72fc 3361 if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
573412b2 3362 unsigned long last_sync = jiffies - cfqd->last_delayed_sync;
8e296755 3363 unsigned int depth;
365722bb 3364
61f0c1dc 3365 depth = last_sync / cfqd->cfq_slice[1];
e00c54c3
JA
3366 if (!depth && !cfqq->dispatched)
3367 depth = 1;
8e296755
JA
3368 if (depth < max_dispatch)
3369 max_dispatch = depth;
2f5cb738 3370 }
3ed9a296 3371
0b182d61
JA
3372 /*
3373 * If we're below the current max, allow a dispatch
3374 */
3375 return cfqq->dispatched < max_dispatch;
3376}
3377
3378/*
3379 * Dispatch a request from cfqq, moving them to the request queue
3380 * dispatch list.
3381 */
3382static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3383{
3384 struct request *rq;
3385
3386 BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
3387
3388 if (!cfq_may_dispatch(cfqd, cfqq))
3389 return false;
3390
3391 /*
3392 * follow expired path, else get first next available
3393 */
3394 rq = cfq_check_fifo(cfqq);
3395 if (!rq)
3396 rq = cfqq->next_rq;
3397
3398 /*
3399 * insert request into driver dispatch list
3400 */
3401 cfq_dispatch_insert(cfqd->queue, rq);
3402
3403 if (!cfqd->active_cic) {
c5869807 3404 struct cfq_io_cq *cic = RQ_CIC(rq);
0b182d61 3405
c5869807 3406 atomic_long_inc(&cic->icq.ioc->refcount);
0b182d61
JA
3407 cfqd->active_cic = cic;
3408 }
3409
3410 return true;
3411}
3412
3413/*
3414 * Find the cfqq that we need to service and move a request from that to the
3415 * dispatch list
3416 */
3417static int cfq_dispatch_requests(struct request_queue *q, int force)
3418{
3419 struct cfq_data *cfqd = q->elevator->elevator_data;
3420 struct cfq_queue *cfqq;
3421
3422 if (!cfqd->busy_queues)
3423 return 0;
3424
3425 if (unlikely(force))
3426 return cfq_forced_dispatch(cfqd);
3427
3428 cfqq = cfq_select_queue(cfqd);
3429 if (!cfqq)
8e296755
JA
3430 return 0;
3431
2f5cb738 3432 /*
0b182d61 3433 * Dispatch a request from this cfqq, if it is allowed
2f5cb738 3434 */
0b182d61
JA
3435 if (!cfq_dispatch_request(cfqd, cfqq))
3436 return 0;
3437
2f5cb738 3438 cfqq->slice_dispatch++;
b029195d 3439 cfq_clear_cfqq_must_dispatch(cfqq);
22e2c507 3440
2f5cb738
JA
3441 /*
3442 * expire an async queue immediately if it has used up its slice. idle
3443 * queue always expire after 1 dispatch round.
3444 */
3445 if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
3446 cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
3447 cfq_class_idle(cfqq))) {
3448 cfqq->slice_end = jiffies + 1;
e5ff082e 3449 cfq_slice_expired(cfqd, 0);
1da177e4
LT
3450 }
3451
b217a903 3452 cfq_log_cfqq(cfqd, cfqq, "dispatched a request");
2f5cb738 3453 return 1;
1da177e4
LT
3454}
3455
1da177e4 3456/*
5e705374
JA
3457 * task holds one reference to the queue, dropped when task exits. each rq
3458 * in-flight on this queue also holds a reference, dropped when rq is freed.
1da177e4 3459 *
b1c35769 3460 * Each cfq queue took a reference on the parent group. Drop it now.
1da177e4
LT
3461 * queue lock must be held here.
3462 */
3463static void cfq_put_queue(struct cfq_queue *cfqq)
3464{
22e2c507 3465 struct cfq_data *cfqd = cfqq->cfqd;
0bbfeb83 3466 struct cfq_group *cfqg;
22e2c507 3467
30d7b944 3468 BUG_ON(cfqq->ref <= 0);
1da177e4 3469
30d7b944
SL
3470 cfqq->ref--;
3471 if (cfqq->ref)
1da177e4
LT
3472 return;
3473
7b679138 3474 cfq_log_cfqq(cfqd, cfqq, "put_queue");
1da177e4 3475 BUG_ON(rb_first(&cfqq->sort_list));
22e2c507 3476 BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
b1c35769 3477 cfqg = cfqq->cfqg;
1da177e4 3478
28f95cbc 3479 if (unlikely(cfqd->active_queue == cfqq)) {
e5ff082e 3480 __cfq_slice_expired(cfqd, cfqq, 0);
23e018a1 3481 cfq_schedule_dispatch(cfqd);
28f95cbc 3482 }
22e2c507 3483
f04a6424 3484 BUG_ON(cfq_cfqq_on_rr(cfqq));
1da177e4 3485 kmem_cache_free(cfq_pool, cfqq);
eb7d8c07 3486 cfqg_put(cfqg);
1da177e4
LT
3487}
3488
d02a2c07 3489static void cfq_put_cooperator(struct cfq_queue *cfqq)
1da177e4 3490{
df5fe3e8
JM
3491 struct cfq_queue *__cfqq, *next;
3492
df5fe3e8
JM
3493 /*
3494 * If this queue was scheduled to merge with another queue, be
3495 * sure to drop the reference taken on that queue (and others in
3496 * the merge chain). See cfq_setup_merge and cfq_merge_cfqqs.
3497 */
3498 __cfqq = cfqq->new_cfqq;
3499 while (__cfqq) {
3500 if (__cfqq == cfqq) {
3501 WARN(1, "cfqq->new_cfqq loop detected\n");
3502 break;
3503 }
3504 next = __cfqq->new_cfqq;
3505 cfq_put_queue(__cfqq);
3506 __cfqq = next;
3507 }
d02a2c07
SL
3508}
3509
3510static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3511{
3512 if (unlikely(cfqq == cfqd->active_queue)) {
3513 __cfq_slice_expired(cfqd, cfqq, 0);
3514 cfq_schedule_dispatch(cfqd);
3515 }
3516
3517 cfq_put_cooperator(cfqq);
df5fe3e8 3518
89850f7e
JA
3519 cfq_put_queue(cfqq);
3520}
22e2c507 3521
9b84cacd
TH
3522static void cfq_init_icq(struct io_cq *icq)
3523{
3524 struct cfq_io_cq *cic = icq_to_cic(icq);
3525
3526 cic->ttime.last_end_request = jiffies;
3527}
3528
c5869807 3529static void cfq_exit_icq(struct io_cq *icq)
89850f7e 3530{
c5869807 3531 struct cfq_io_cq *cic = icq_to_cic(icq);
283287a5 3532 struct cfq_data *cfqd = cic_to_cfqd(cic);
4faa3c81 3533
563180a4
TH
3534 if (cic_to_cfqq(cic, false)) {
3535 cfq_exit_cfqq(cfqd, cic_to_cfqq(cic, false));
3536 cic_set_cfqq(cic, NULL, false);
12a05732
AV
3537 }
3538
563180a4
TH
3539 if (cic_to_cfqq(cic, true)) {
3540 cfq_exit_cfqq(cfqd, cic_to_cfqq(cic, true));
3541 cic_set_cfqq(cic, NULL, true);
12a05732 3542 }
89850f7e
JA
3543}
3544
abede6da 3545static void cfq_init_prio_data(struct cfq_queue *cfqq, struct cfq_io_cq *cic)
22e2c507
JA
3546{
3547 struct task_struct *tsk = current;
3548 int ioprio_class;
3549
3b18152c 3550 if (!cfq_cfqq_prio_changed(cfqq))
22e2c507
JA
3551 return;
3552
598971bf 3553 ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
22e2c507 3554 switch (ioprio_class) {
fe094d98
JA
3555 default:
3556 printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
3557 case IOPRIO_CLASS_NONE:
3558 /*
6d63c275 3559 * no prio set, inherit CPU scheduling settings
fe094d98
JA
3560 */
3561 cfqq->ioprio = task_nice_ioprio(tsk);
6d63c275 3562 cfqq->ioprio_class = task_nice_ioclass(tsk);
fe094d98
JA
3563 break;
3564 case IOPRIO_CLASS_RT:
598971bf 3565 cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
fe094d98
JA
3566 cfqq->ioprio_class = IOPRIO_CLASS_RT;
3567 break;
3568 case IOPRIO_CLASS_BE:
598971bf 3569 cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
fe094d98
JA
3570 cfqq->ioprio_class = IOPRIO_CLASS_BE;
3571 break;
3572 case IOPRIO_CLASS_IDLE:
3573 cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
3574 cfqq->ioprio = 7;
3575 cfq_clear_cfqq_idle_window(cfqq);
3576 break;
22e2c507
JA
3577 }
3578
3579 /*
3580 * keep track of original prio settings in case we have to temporarily
3581 * elevate the priority of this queue
3582 */
3583 cfqq->org_ioprio = cfqq->ioprio;
3b18152c 3584 cfq_clear_cfqq_prio_changed(cfqq);
22e2c507
JA
3585}
3586
598971bf 3587static void check_ioprio_changed(struct cfq_io_cq *cic, struct bio *bio)
22e2c507 3588{
598971bf 3589 int ioprio = cic->icq.ioc->ioprio;
bca4b914 3590 struct cfq_data *cfqd = cic_to_cfqd(cic);
478a82b0 3591 struct cfq_queue *cfqq;
35e6077c 3592
598971bf
TH
3593 /*
3594 * Check whether ioprio has changed. The condition may trigger
3595 * spuriously on a newly created cic but there's no harm.
3596 */
3597 if (unlikely(!cfqd) || likely(cic->ioprio == ioprio))
caaa5f9f
JA
3598 return;
3599
563180a4 3600 cfqq = cic_to_cfqq(cic, false);
caaa5f9f 3601 if (cfqq) {
563180a4 3602 cfq_put_queue(cfqq);
2da8de0b 3603 cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic, bio);
563180a4 3604 cic_set_cfqq(cic, cfqq, false);
22e2c507 3605 }
caaa5f9f 3606
563180a4 3607 cfqq = cic_to_cfqq(cic, true);
caaa5f9f
JA
3608 if (cfqq)
3609 cfq_mark_cfqq_prio_changed(cfqq);
598971bf
TH
3610
3611 cic->ioprio = ioprio;
22e2c507
JA
3612}
3613
d5036d77 3614static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
a6151c3a 3615 pid_t pid, bool is_sync)
d5036d77
JA
3616{
3617 RB_CLEAR_NODE(&cfqq->rb_node);
3618 RB_CLEAR_NODE(&cfqq->p_node);
3619 INIT_LIST_HEAD(&cfqq->fifo);
3620
30d7b944 3621 cfqq->ref = 0;
d5036d77
JA
3622 cfqq->cfqd = cfqd;
3623
3624 cfq_mark_cfqq_prio_changed(cfqq);
3625
3626 if (is_sync) {
3627 if (!cfq_class_idle(cfqq))
3628 cfq_mark_cfqq_idle_window(cfqq);
3629 cfq_mark_cfqq_sync(cfqq);
3630 }
3631 cfqq->pid = pid;
3632}
3633
24610333 3634#ifdef CONFIG_CFQ_GROUP_IOSCHED
598971bf 3635static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
24610333 3636{
bca4b914 3637 struct cfq_data *cfqd = cic_to_cfqd(cic);
60a83707 3638 struct cfq_queue *cfqq;
f4da8072 3639 uint64_t serial_nr;
24610333 3640
598971bf 3641 rcu_read_lock();
f4da8072 3642 serial_nr = bio_blkcg(bio)->css.serial_nr;
598971bf 3643 rcu_read_unlock();
24610333 3644
598971bf
TH
3645 /*
3646 * Check whether blkcg has changed. The condition may trigger
3647 * spuriously on a newly created cic but there's no harm.
3648 */
f4da8072 3649 if (unlikely(!cfqd) || likely(cic->blkcg_serial_nr == serial_nr))
598971bf 3650 return;
24610333 3651
60a83707
TH
3652 /*
3653 * Drop reference to queues. New queues will be assigned in new
3654 * group upon arrival of fresh requests.
3655 */
3656 cfqq = cic_to_cfqq(cic, false);
3657 if (cfqq) {
3658 cfq_log_cfqq(cfqd, cfqq, "changed cgroup");
3659 cic_set_cfqq(cic, NULL, false);
3660 cfq_put_queue(cfqq);
3661 }
3662
3663 cfqq = cic_to_cfqq(cic, true);
3664 if (cfqq) {
3665 cfq_log_cfqq(cfqd, cfqq, "changed cgroup");
3666 cic_set_cfqq(cic, NULL, true);
3667 cfq_put_queue(cfqq);
24610333 3668 }
598971bf 3669
f4da8072 3670 cic->blkcg_serial_nr = serial_nr;
24610333 3671}
598971bf
TH
3672#else
3673static inline void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio) { }
24610333
VG
3674#endif /* CONFIG_CFQ_GROUP_IOSCHED */
3675
c2dea2d1 3676static struct cfq_queue **
60a83707 3677cfq_async_queue_prio(struct cfq_group *cfqg, int ioprio_class, int ioprio)
c2dea2d1 3678{
fe094d98 3679 switch (ioprio_class) {
c2dea2d1 3680 case IOPRIO_CLASS_RT:
60a83707 3681 return &cfqg->async_cfqq[0][ioprio];
598971bf
TH
3682 case IOPRIO_CLASS_NONE:
3683 ioprio = IOPRIO_NORM;
3684 /* fall through */
c2dea2d1 3685 case IOPRIO_CLASS_BE:
60a83707 3686 return &cfqg->async_cfqq[1][ioprio];
c2dea2d1 3687 case IOPRIO_CLASS_IDLE:
60a83707 3688 return &cfqg->async_idle_cfqq;
c2dea2d1
VT
3689 default:
3690 BUG();
3691 }
3692}
3693
15c31be4 3694static struct cfq_queue *
abede6da 3695cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
2da8de0b 3696 struct bio *bio)
15c31be4 3697{
c6ce1943
JM
3698 int ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
3699 int ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
d4aad7ff 3700 struct cfq_queue **async_cfqq = NULL;
4ebc1c61 3701 struct cfq_queue *cfqq;
322731ed
TH
3702 struct cfq_group *cfqg;
3703
3704 rcu_read_lock();
3705 cfqg = cfq_lookup_create_cfqg(cfqd, bio_blkcg(bio));
3706 if (!cfqg) {
3707 cfqq = &cfqd->oom_cfqq;
3708 goto out;
3709 }
15c31be4 3710
c2dea2d1 3711 if (!is_sync) {
c6ce1943
JM
3712 if (!ioprio_valid(cic->ioprio)) {
3713 struct task_struct *tsk = current;
3714 ioprio = task_nice_ioprio(tsk);
3715 ioprio_class = task_nice_ioclass(tsk);
3716 }
60a83707 3717 async_cfqq = cfq_async_queue_prio(cfqg, ioprio_class, ioprio);
c2dea2d1 3718 cfqq = *async_cfqq;
4ebc1c61
TH
3719 if (cfqq)
3720 goto out;
c2dea2d1
VT
3721 }
3722
d4aad7ff
TH
3723 cfqq = kmem_cache_alloc_node(cfq_pool, GFP_NOWAIT | __GFP_ZERO,
3724 cfqd->queue->node);
3725 if (!cfqq) {
3726 cfqq = &cfqd->oom_cfqq;
3727 goto out;
3728 }
3729
3730 cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
3731 cfq_init_prio_data(cfqq, cic);
3732 cfq_link_cfqq_cfqg(cfqq, cfqg);
3733 cfq_log_cfqq(cfqd, cfqq, "alloced");
15c31be4 3734
d4aad7ff
TH
3735 if (async_cfqq) {
3736 /* a new async queue is created, pin and remember */
30d7b944 3737 cfqq->ref++;
c2dea2d1 3738 *async_cfqq = cfqq;
15c31be4 3739 }
4ebc1c61 3740out:
30d7b944 3741 cfqq->ref++;
322731ed 3742 rcu_read_unlock();
15c31be4
JA
3743 return cfqq;
3744}
3745
22e2c507 3746static void
383cd721 3747__cfq_update_io_thinktime(struct cfq_ttime *ttime, unsigned long slice_idle)
1da177e4 3748{
383cd721
SL
3749 unsigned long elapsed = jiffies - ttime->last_end_request;
3750 elapsed = min(elapsed, 2UL * slice_idle);
db3b5848 3751
383cd721
SL
3752 ttime->ttime_samples = (7*ttime->ttime_samples + 256) / 8;
3753 ttime->ttime_total = (7*ttime->ttime_total + 256*elapsed) / 8;
3754 ttime->ttime_mean = (ttime->ttime_total + 128) / ttime->ttime_samples;
3755}
3756
3757static void
3758cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
c5869807 3759 struct cfq_io_cq *cic)
383cd721 3760{
f5f2b6ce 3761 if (cfq_cfqq_sync(cfqq)) {
383cd721 3762 __cfq_update_io_thinktime(&cic->ttime, cfqd->cfq_slice_idle);
f5f2b6ce
SL
3763 __cfq_update_io_thinktime(&cfqq->service_tree->ttime,
3764 cfqd->cfq_slice_idle);
3765 }
7700fc4f
SL
3766#ifdef CONFIG_CFQ_GROUP_IOSCHED
3767 __cfq_update_io_thinktime(&cfqq->cfqg->ttime, cfqd->cfq_group_idle);
3768#endif
22e2c507 3769}
1da177e4 3770
206dc69b 3771static void
b2c18e1e 3772cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
6d048f53 3773 struct request *rq)
206dc69b 3774{
3dde36dd 3775 sector_t sdist = 0;
41647e7a 3776 sector_t n_sec = blk_rq_sectors(rq);
3dde36dd
CZ
3777 if (cfqq->last_request_pos) {
3778 if (cfqq->last_request_pos < blk_rq_pos(rq))
3779 sdist = blk_rq_pos(rq) - cfqq->last_request_pos;
3780 else
3781 sdist = cfqq->last_request_pos - blk_rq_pos(rq);
3782 }
206dc69b 3783
3dde36dd 3784 cfqq->seek_history <<= 1;
41647e7a
CZ
3785 if (blk_queue_nonrot(cfqd->queue))
3786 cfqq->seek_history |= (n_sec < CFQQ_SECT_THR_NONROT);
3787 else
3788 cfqq->seek_history |= (sdist > CFQQ_SEEK_THR);
206dc69b 3789}
1da177e4 3790
22e2c507
JA
3791/*
3792 * Disable idle window if the process thinks too long or seeks so much that
3793 * it doesn't matter
3794 */
3795static void
3796cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
c5869807 3797 struct cfq_io_cq *cic)
22e2c507 3798{
7b679138 3799 int old_idle, enable_idle;
1be92f2f 3800
0871714e
JA
3801 /*
3802 * Don't idle for async or idle io prio class
3803 */
3804 if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq))
1be92f2f
JA
3805 return;
3806
c265a7f4 3807 enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
1da177e4 3808
76280aff
CZ
3809 if (cfqq->queued[0] + cfqq->queued[1] >= 4)
3810 cfq_mark_cfqq_deep(cfqq);
3811
749ef9f8
CZ
3812 if (cfqq->next_rq && (cfqq->next_rq->cmd_flags & REQ_NOIDLE))
3813 enable_idle = 0;
f6e8d01b 3814 else if (!atomic_read(&cic->icq.ioc->active_ref) ||
c5869807
TH
3815 !cfqd->cfq_slice_idle ||
3816 (!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq)))
22e2c507 3817 enable_idle = 0;
383cd721
SL
3818 else if (sample_valid(cic->ttime.ttime_samples)) {
3819 if (cic->ttime.ttime_mean > cfqd->cfq_slice_idle)
22e2c507
JA
3820 enable_idle = 0;
3821 else
3822 enable_idle = 1;
1da177e4
LT
3823 }
3824
7b679138
JA
3825 if (old_idle != enable_idle) {
3826 cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle);
3827 if (enable_idle)
3828 cfq_mark_cfqq_idle_window(cfqq);
3829 else
3830 cfq_clear_cfqq_idle_window(cfqq);
3831 }
22e2c507 3832}
1da177e4 3833
22e2c507
JA
3834/*
3835 * Check if new_cfqq should preempt the currently active queue. Return 0 for
3836 * no or if we aren't sure, a 1 will cause a preempt.
3837 */
a6151c3a 3838static bool
22e2c507 3839cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
5e705374 3840 struct request *rq)
22e2c507 3841{
6d048f53 3842 struct cfq_queue *cfqq;
22e2c507 3843
6d048f53
JA
3844 cfqq = cfqd->active_queue;
3845 if (!cfqq)
a6151c3a 3846 return false;
22e2c507 3847
6d048f53 3848 if (cfq_class_idle(new_cfqq))
a6151c3a 3849 return false;
22e2c507
JA
3850
3851 if (cfq_class_idle(cfqq))
a6151c3a 3852 return true;
1e3335de 3853
875feb63
DS
3854 /*
3855 * Don't allow a non-RT request to preempt an ongoing RT cfqq timeslice.
3856 */
3857 if (cfq_class_rt(cfqq) && !cfq_class_rt(new_cfqq))
3858 return false;
3859
374f84ac
JA
3860 /*
3861 * if the new request is sync, but the currently running queue is
3862 * not, let the sync request have priority.
3863 */
5e705374 3864 if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
a6151c3a 3865 return true;
1e3335de 3866
8682e1f1
VG
3867 if (new_cfqq->cfqg != cfqq->cfqg)
3868 return false;
3869
3870 if (cfq_slice_used(cfqq))
3871 return true;
3872
3873 /* Allow preemption only if we are idling on sync-noidle tree */
4d2ceea4 3874 if (cfqd->serving_wl_type == SYNC_NOIDLE_WORKLOAD &&
8682e1f1
VG
3875 cfqq_type(new_cfqq) == SYNC_NOIDLE_WORKLOAD &&
3876 new_cfqq->service_tree->count == 2 &&
3877 RB_EMPTY_ROOT(&cfqq->sort_list))
3878 return true;
3879
b53d1ed7
JA
3880 /*
3881 * So both queues are sync. Let the new request get disk time if
3882 * it's a metadata request and the current queue is doing regular IO.
3883 */
65299a3b 3884 if ((rq->cmd_flags & REQ_PRIO) && !cfqq->prio_pending)
b53d1ed7
JA
3885 return true;
3886
3a9a3f6c
DS
3887 /*
3888 * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
3889 */
3890 if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
a6151c3a 3891 return true;
3a9a3f6c 3892
d2d59e18
SL
3893 /* An idle queue should not be idle now for some reason */
3894 if (RB_EMPTY_ROOT(&cfqq->sort_list) && !cfq_should_idle(cfqd, cfqq))
3895 return true;
3896
1e3335de 3897 if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
a6151c3a 3898 return false;
1e3335de
JA
3899
3900 /*
3901 * if this request is as-good as one we would expect from the
3902 * current cfqq, let it preempt
3903 */
e9ce335d 3904 if (cfq_rq_close(cfqd, cfqq, rq))
a6151c3a 3905 return true;
1e3335de 3906
a6151c3a 3907 return false;
22e2c507
JA
3908}
3909
3910/*
3911 * cfqq preempts the active queue. if we allowed preempt with no slice left,
3912 * let it have half of its nominal slice.
3913 */
3914static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3915{
df0793ab
SL
3916 enum wl_type_t old_type = cfqq_type(cfqd->active_queue);
3917
7b679138 3918 cfq_log_cfqq(cfqd, cfqq, "preempt");
df0793ab 3919 cfq_slice_expired(cfqd, 1);
22e2c507 3920
f8ae6e3e
SL
3921 /*
3922 * workload type is changed, don't save slice, otherwise preempt
3923 * doesn't happen
3924 */
df0793ab 3925 if (old_type != cfqq_type(cfqq))
4d2ceea4 3926 cfqq->cfqg->saved_wl_slice = 0;
f8ae6e3e 3927
bf572256
JA
3928 /*
3929 * Put the new queue at the front of the of the current list,
3930 * so we know that it will be selected next.
3931 */
3932 BUG_ON(!cfq_cfqq_on_rr(cfqq));
edd75ffd
JA
3933
3934 cfq_service_tree_add(cfqd, cfqq, 1);
eda5e0c9 3935
62a37f6b
JT
3936 cfqq->slice_end = 0;
3937 cfq_mark_cfqq_slice_new(cfqq);
22e2c507
JA
3938}
3939
22e2c507 3940/*
5e705374 3941 * Called when a new fs request (rq) is added (to cfqq). Check if there's
22e2c507
JA
3942 * something we should do about it
3943 */
3944static void
5e705374
JA
3945cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3946 struct request *rq)
22e2c507 3947{
c5869807 3948 struct cfq_io_cq *cic = RQ_CIC(rq);
12e9fddd 3949
45333d5a 3950 cfqd->rq_queued++;
65299a3b
CH
3951 if (rq->cmd_flags & REQ_PRIO)
3952 cfqq->prio_pending++;
374f84ac 3953
383cd721 3954 cfq_update_io_thinktime(cfqd, cfqq, cic);
b2c18e1e 3955 cfq_update_io_seektime(cfqd, cfqq, rq);
9c2c38a1
JA
3956 cfq_update_idle_window(cfqd, cfqq, cic);
3957
b2c18e1e 3958 cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
22e2c507
JA
3959
3960 if (cfqq == cfqd->active_queue) {
3961 /*
b029195d
JA
3962 * Remember that we saw a request from this process, but
3963 * don't start queuing just yet. Otherwise we risk seeing lots
3964 * of tiny requests, because we disrupt the normal plugging
d6ceb25e
JA
3965 * and merging. If the request is already larger than a single
3966 * page, let it rip immediately. For that case we assume that
2d870722
JA
3967 * merging is already done. Ditto for a busy system that
3968 * has other work pending, don't risk delaying until the
3969 * idle timer unplug to continue working.
22e2c507 3970 */
d6ceb25e 3971 if (cfq_cfqq_wait_request(cfqq)) {
2d870722
JA
3972 if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
3973 cfqd->busy_queues > 1) {
812df48d 3974 cfq_del_timer(cfqd, cfqq);
554554f6 3975 cfq_clear_cfqq_wait_request(cfqq);
24ecfbe2 3976 __blk_run_queue(cfqd->queue);
a11cdaa7 3977 } else {
155fead9 3978 cfqg_stats_update_idle_time(cfqq->cfqg);
bf791937 3979 cfq_mark_cfqq_must_dispatch(cfqq);
a11cdaa7 3980 }
d6ceb25e 3981 }
5e705374 3982 } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
22e2c507
JA
3983 /*
3984 * not the active queue - expire current slice if it is
3985 * idle and has expired it's mean thinktime or this new queue
3a9a3f6c
DS
3986 * has some old slice time left and is of higher priority or
3987 * this new queue is RT and the current one is BE
22e2c507
JA
3988 */
3989 cfq_preempt_queue(cfqd, cfqq);
24ecfbe2 3990 __blk_run_queue(cfqd->queue);
22e2c507 3991 }
1da177e4
LT
3992}
3993
165125e1 3994static void cfq_insert_request(struct request_queue *q, struct request *rq)
1da177e4 3995{
b4878f24 3996 struct cfq_data *cfqd = q->elevator->elevator_data;
5e705374 3997 struct cfq_queue *cfqq = RQ_CFQQ(rq);
22e2c507 3998
7b679138 3999 cfq_log_cfqq(cfqd, cfqq, "insert_request");
abede6da 4000 cfq_init_prio_data(cfqq, RQ_CIC(rq));
1da177e4 4001
8b4922d3 4002 rq->fifo_time = jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)];
22e2c507 4003 list_add_tail(&rq->queuelist, &cfqq->fifo);
aa6f6a3d 4004 cfq_add_rq_rb(rq);
155fead9
TH
4005 cfqg_stats_update_io_add(RQ_CFQG(rq), cfqd->serving_group,
4006 rq->cmd_flags);
5e705374 4007 cfq_rq_enqueued(cfqd, cfqq, rq);
1da177e4
LT
4008}
4009
45333d5a
AC
4010/*
4011 * Update hw_tag based on peak queue depth over 50 samples under
4012 * sufficient load.
4013 */
4014static void cfq_update_hw_tag(struct cfq_data *cfqd)
4015{
1a1238a7
SL
4016 struct cfq_queue *cfqq = cfqd->active_queue;
4017
53c583d2
CZ
4018 if (cfqd->rq_in_driver > cfqd->hw_tag_est_depth)
4019 cfqd->hw_tag_est_depth = cfqd->rq_in_driver;
e459dd08
CZ
4020
4021 if (cfqd->hw_tag == 1)
4022 return;
45333d5a
AC
4023
4024 if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN &&
53c583d2 4025 cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN)
45333d5a
AC
4026 return;
4027
1a1238a7
SL
4028 /*
4029 * If active queue hasn't enough requests and can idle, cfq might not
4030 * dispatch sufficient requests to hardware. Don't zero hw_tag in this
4031 * case
4032 */
4033 if (cfqq && cfq_cfqq_idle_window(cfqq) &&
4034 cfqq->dispatched + cfqq->queued[0] + cfqq->queued[1] <
53c583d2 4035 CFQ_HW_QUEUE_MIN && cfqd->rq_in_driver < CFQ_HW_QUEUE_MIN)
1a1238a7
SL
4036 return;
4037
45333d5a
AC
4038 if (cfqd->hw_tag_samples++ < 50)
4039 return;
4040
e459dd08 4041 if (cfqd->hw_tag_est_depth >= CFQ_HW_QUEUE_MIN)
45333d5a
AC
4042 cfqd->hw_tag = 1;
4043 else
4044 cfqd->hw_tag = 0;
45333d5a
AC
4045}
4046
7667aa06
VG
4047static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq)
4048{
c5869807 4049 struct cfq_io_cq *cic = cfqd->active_cic;
7667aa06 4050
02a8f01b
JT
4051 /* If the queue already has requests, don't wait */
4052 if (!RB_EMPTY_ROOT(&cfqq->sort_list))
4053 return false;
4054
7667aa06
VG
4055 /* If there are other queues in the group, don't wait */
4056 if (cfqq->cfqg->nr_cfqq > 1)
4057 return false;
4058
7700fc4f
SL
4059 /* the only queue in the group, but think time is big */
4060 if (cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true))
4061 return false;
4062
7667aa06
VG
4063 if (cfq_slice_used(cfqq))
4064 return true;
4065
4066 /* if slice left is less than think time, wait busy */
383cd721
SL
4067 if (cic && sample_valid(cic->ttime.ttime_samples)
4068 && (cfqq->slice_end - jiffies < cic->ttime.ttime_mean))
7667aa06
VG
4069 return true;
4070
4071 /*
4072 * If think times is less than a jiffy than ttime_mean=0 and above
4073 * will not be true. It might happen that slice has not expired yet
4074 * but will expire soon (4-5 ns) during select_queue(). To cover the
4075 * case where think time is less than a jiffy, mark the queue wait
4076 * busy if only 1 jiffy is left in the slice.
4077 */
4078 if (cfqq->slice_end - jiffies == 1)
4079 return true;
4080
4081 return false;
4082}
4083
165125e1 4084static void cfq_completed_request(struct request_queue *q, struct request *rq)
1da177e4 4085{
5e705374 4086 struct cfq_queue *cfqq = RQ_CFQQ(rq);
b4878f24 4087 struct cfq_data *cfqd = cfqq->cfqd;
5380a101 4088 const int sync = rq_is_sync(rq);
b4878f24 4089 unsigned long now;
1da177e4 4090
b4878f24 4091 now = jiffies;
33659ebb
CH
4092 cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d",
4093 !!(rq->cmd_flags & REQ_NOIDLE));
1da177e4 4094
45333d5a
AC
4095 cfq_update_hw_tag(cfqd);
4096
53c583d2 4097 WARN_ON(!cfqd->rq_in_driver);
6d048f53 4098 WARN_ON(!cfqq->dispatched);
53c583d2 4099 cfqd->rq_in_driver--;
6d048f53 4100 cfqq->dispatched--;
80bdf0c7 4101 (RQ_CFQG(rq))->dispatched--;
155fead9
TH
4102 cfqg_stats_update_completion(cfqq->cfqg, rq_start_time_ns(rq),
4103 rq_io_start_time_ns(rq), rq->cmd_flags);
1da177e4 4104
53c583d2 4105 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
3ed9a296 4106
365722bb 4107 if (sync) {
34b98d03 4108 struct cfq_rb_root *st;
f5f2b6ce 4109
383cd721 4110 RQ_CIC(rq)->ttime.last_end_request = now;
f5f2b6ce
SL
4111
4112 if (cfq_cfqq_on_rr(cfqq))
34b98d03 4113 st = cfqq->service_tree;
f5f2b6ce 4114 else
34b98d03
VG
4115 st = st_for(cfqq->cfqg, cfqq_class(cfqq),
4116 cfqq_type(cfqq));
4117
4118 st->ttime.last_end_request = now;
573412b2
CZ
4119 if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now))
4120 cfqd->last_delayed_sync = now;
365722bb 4121 }
caaa5f9f 4122
7700fc4f
SL
4123#ifdef CONFIG_CFQ_GROUP_IOSCHED
4124 cfqq->cfqg->ttime.last_end_request = now;
4125#endif
4126
caaa5f9f
JA
4127 /*
4128 * If this is the active queue, check if it needs to be expired,
4129 * or if we want to idle in case it has no pending requests.
4130 */
4131 if (cfqd->active_queue == cfqq) {
a36e71f9
JA
4132 const bool cfqq_empty = RB_EMPTY_ROOT(&cfqq->sort_list);
4133
44f7c160
JA
4134 if (cfq_cfqq_slice_new(cfqq)) {
4135 cfq_set_prio_slice(cfqd, cfqq);
4136 cfq_clear_cfqq_slice_new(cfqq);
4137 }
f75edf2d
VG
4138
4139 /*
7667aa06
VG
4140 * Should we wait for next request to come in before we expire
4141 * the queue.
f75edf2d 4142 */
7667aa06 4143 if (cfq_should_wait_busy(cfqd, cfqq)) {
80bdf0c7
VG
4144 unsigned long extend_sl = cfqd->cfq_slice_idle;
4145 if (!cfqd->cfq_slice_idle)
4146 extend_sl = cfqd->cfq_group_idle;
4147 cfqq->slice_end = jiffies + extend_sl;
f75edf2d 4148 cfq_mark_cfqq_wait_busy(cfqq);
b1ffe737 4149 cfq_log_cfqq(cfqd, cfqq, "will busy wait");
f75edf2d
VG
4150 }
4151
a36e71f9 4152 /*
8e550632
CZ
4153 * Idling is not enabled on:
4154 * - expired queues
4155 * - idle-priority queues
4156 * - async queues
4157 * - queues with still some requests queued
4158 * - when there is a close cooperator
a36e71f9 4159 */
0871714e 4160 if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
e5ff082e 4161 cfq_slice_expired(cfqd, 1);
8e550632
CZ
4162 else if (sync && cfqq_empty &&
4163 !cfq_close_cooperator(cfqd, cfqq)) {
749ef9f8 4164 cfq_arm_slice_timer(cfqd);
8e550632 4165 }
caaa5f9f 4166 }
6d048f53 4167
53c583d2 4168 if (!cfqd->rq_in_driver)
23e018a1 4169 cfq_schedule_dispatch(cfqd);
1da177e4
LT
4170}
4171
89850f7e 4172static inline int __cfq_may_queue(struct cfq_queue *cfqq)
22e2c507 4173{
1b379d8d 4174 if (cfq_cfqq_wait_request(cfqq) && !cfq_cfqq_must_alloc_slice(cfqq)) {
3b18152c 4175 cfq_mark_cfqq_must_alloc_slice(cfqq);
22e2c507 4176 return ELV_MQUEUE_MUST;
3b18152c 4177 }
1da177e4 4178
22e2c507 4179 return ELV_MQUEUE_MAY;
22e2c507
JA
4180}
4181
165125e1 4182static int cfq_may_queue(struct request_queue *q, int rw)
22e2c507
JA
4183{
4184 struct cfq_data *cfqd = q->elevator->elevator_data;
4185 struct task_struct *tsk = current;
c5869807 4186 struct cfq_io_cq *cic;
22e2c507
JA
4187 struct cfq_queue *cfqq;
4188
4189 /*
4190 * don't force setup of a queue from here, as a call to may_queue
4191 * does not necessarily imply that a request actually will be queued.
4192 * so just lookup a possibly existing queue, or return 'may queue'
4193 * if that fails
4194 */
4ac845a2 4195 cic = cfq_cic_lookup(cfqd, tsk->io_context);
91fac317
VT
4196 if (!cic)
4197 return ELV_MQUEUE_MAY;
4198
b0b78f81 4199 cfqq = cic_to_cfqq(cic, rw_is_sync(rw));
22e2c507 4200 if (cfqq) {
abede6da 4201 cfq_init_prio_data(cfqq, cic);
22e2c507 4202
89850f7e 4203 return __cfq_may_queue(cfqq);
22e2c507
JA
4204 }
4205
4206 return ELV_MQUEUE_MAY;
1da177e4
LT
4207}
4208
1da177e4
LT
4209/*
4210 * queue lock held here
4211 */
bb37b94c 4212static void cfq_put_request(struct request *rq)
1da177e4 4213{
5e705374 4214 struct cfq_queue *cfqq = RQ_CFQQ(rq);
1da177e4 4215
5e705374 4216 if (cfqq) {
22e2c507 4217 const int rw = rq_data_dir(rq);
1da177e4 4218
22e2c507
JA
4219 BUG_ON(!cfqq->allocated[rw]);
4220 cfqq->allocated[rw]--;
1da177e4 4221
7f1dc8a2 4222 /* Put down rq reference on cfqg */
eb7d8c07 4223 cfqg_put(RQ_CFQG(rq));
a612fddf
TH
4224 rq->elv.priv[0] = NULL;
4225 rq->elv.priv[1] = NULL;
7f1dc8a2 4226
1da177e4
LT
4227 cfq_put_queue(cfqq);
4228 }
4229}
4230
df5fe3e8 4231static struct cfq_queue *
c5869807 4232cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_cq *cic,
df5fe3e8
JM
4233 struct cfq_queue *cfqq)
4234{
4235 cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq);
4236 cic_set_cfqq(cic, cfqq->new_cfqq, 1);
b3b6d040 4237 cfq_mark_cfqq_coop(cfqq->new_cfqq);
df5fe3e8
JM
4238 cfq_put_queue(cfqq);
4239 return cic_to_cfqq(cic, 1);
4240}
4241
e6c5bc73
JM
4242/*
4243 * Returns NULL if a new cfqq should be allocated, or the old cfqq if this
4244 * was the last process referring to said cfqq.
4245 */
4246static struct cfq_queue *
c5869807 4247split_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq)
e6c5bc73
JM
4248{
4249 if (cfqq_process_refs(cfqq) == 1) {
e6c5bc73
JM
4250 cfqq->pid = current->pid;
4251 cfq_clear_cfqq_coop(cfqq);
ae54abed 4252 cfq_clear_cfqq_split_coop(cfqq);
e6c5bc73
JM
4253 return cfqq;
4254 }
4255
4256 cic_set_cfqq(cic, NULL, 1);
d02a2c07
SL
4257
4258 cfq_put_cooperator(cfqq);
4259
e6c5bc73
JM
4260 cfq_put_queue(cfqq);
4261 return NULL;
4262}
1da177e4 4263/*
22e2c507 4264 * Allocate cfq data structures associated with this request.
1da177e4 4265 */
22e2c507 4266static int
852c788f
TH
4267cfq_set_request(struct request_queue *q, struct request *rq, struct bio *bio,
4268 gfp_t gfp_mask)
1da177e4
LT
4269{
4270 struct cfq_data *cfqd = q->elevator->elevator_data;
f1f8cc94 4271 struct cfq_io_cq *cic = icq_to_cic(rq->elv.icq);
1da177e4 4272 const int rw = rq_data_dir(rq);
a6151c3a 4273 const bool is_sync = rq_is_sync(rq);
22e2c507 4274 struct cfq_queue *cfqq;
1da177e4 4275
216284c3 4276 spin_lock_irq(q->queue_lock);
f1f8cc94 4277
598971bf
TH
4278 check_ioprio_changed(cic, bio);
4279 check_blkcg_changed(cic, bio);
e6c5bc73 4280new_queue:
91fac317 4281 cfqq = cic_to_cfqq(cic, is_sync);
32f2e807 4282 if (!cfqq || cfqq == &cfqd->oom_cfqq) {
bce6133b
TH
4283 if (cfqq)
4284 cfq_put_queue(cfqq);
2da8de0b 4285 cfqq = cfq_get_queue(cfqd, is_sync, cic, bio);
91fac317 4286 cic_set_cfqq(cic, cfqq, is_sync);
df5fe3e8 4287 } else {
e6c5bc73
JM
4288 /*
4289 * If the queue was seeky for too long, break it apart.
4290 */
ae54abed 4291 if (cfq_cfqq_coop(cfqq) && cfq_cfqq_split_coop(cfqq)) {
e6c5bc73
JM
4292 cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq");
4293 cfqq = split_cfqq(cic, cfqq);
4294 if (!cfqq)
4295 goto new_queue;
4296 }
4297
df5fe3e8
JM
4298 /*
4299 * Check to see if this queue is scheduled to merge with
4300 * another, closely cooperating queue. The merging of
4301 * queues happens here as it must be done in process context.
4302 * The reference on new_cfqq was taken in merge_cfqqs.
4303 */
4304 if (cfqq->new_cfqq)
4305 cfqq = cfq_merge_cfqqs(cfqd, cic, cfqq);
91fac317 4306 }
1da177e4
LT
4307
4308 cfqq->allocated[rw]++;
1da177e4 4309
6fae9c25 4310 cfqq->ref++;
eb7d8c07 4311 cfqg_get(cfqq->cfqg);
a612fddf 4312 rq->elv.priv[0] = cfqq;
1adaf3dd 4313 rq->elv.priv[1] = cfqq->cfqg;
216284c3 4314 spin_unlock_irq(q->queue_lock);
5e705374 4315 return 0;
1da177e4
LT
4316}
4317
65f27f38 4318static void cfq_kick_queue(struct work_struct *work)
22e2c507 4319{
65f27f38 4320 struct cfq_data *cfqd =
23e018a1 4321 container_of(work, struct cfq_data, unplug_work);
165125e1 4322 struct request_queue *q = cfqd->queue;
22e2c507 4323
40bb54d1 4324 spin_lock_irq(q->queue_lock);
24ecfbe2 4325 __blk_run_queue(cfqd->queue);
40bb54d1 4326 spin_unlock_irq(q->queue_lock);
22e2c507
JA
4327}
4328
4329/*
4330 * Timer running if the active_queue is currently idling inside its time slice
4331 */
4332static void cfq_idle_slice_timer(unsigned long data)
4333{
4334 struct cfq_data *cfqd = (struct cfq_data *) data;
4335 struct cfq_queue *cfqq;
4336 unsigned long flags;
3c6bd2f8 4337 int timed_out = 1;
22e2c507 4338
7b679138
JA
4339 cfq_log(cfqd, "idle timer fired");
4340
22e2c507
JA
4341 spin_lock_irqsave(cfqd->queue->queue_lock, flags);
4342
fe094d98
JA
4343 cfqq = cfqd->active_queue;
4344 if (cfqq) {
3c6bd2f8
JA
4345 timed_out = 0;
4346
b029195d
JA
4347 /*
4348 * We saw a request before the queue expired, let it through
4349 */
4350 if (cfq_cfqq_must_dispatch(cfqq))
4351 goto out_kick;
4352
22e2c507
JA
4353 /*
4354 * expired
4355 */
44f7c160 4356 if (cfq_slice_used(cfqq))
22e2c507
JA
4357 goto expire;
4358
4359 /*
4360 * only expire and reinvoke request handler, if there are
4361 * other queues with pending requests
4362 */
caaa5f9f 4363 if (!cfqd->busy_queues)
22e2c507 4364 goto out_cont;
22e2c507
JA
4365
4366 /*
4367 * not expired and it has a request pending, let it dispatch
4368 */
75e50984 4369 if (!RB_EMPTY_ROOT(&cfqq->sort_list))
22e2c507 4370 goto out_kick;
76280aff
CZ
4371
4372 /*
4373 * Queue depth flag is reset only when the idle didn't succeed
4374 */
4375 cfq_clear_cfqq_deep(cfqq);
22e2c507
JA
4376 }
4377expire:
e5ff082e 4378 cfq_slice_expired(cfqd, timed_out);
22e2c507 4379out_kick:
23e018a1 4380 cfq_schedule_dispatch(cfqd);
22e2c507
JA
4381out_cont:
4382 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
4383}
4384
3b18152c
JA
4385static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
4386{
4387 del_timer_sync(&cfqd->idle_slice_timer);
23e018a1 4388 cancel_work_sync(&cfqd->unplug_work);
3b18152c 4389}
22e2c507 4390
b374d18a 4391static void cfq_exit_queue(struct elevator_queue *e)
1da177e4 4392{
22e2c507 4393 struct cfq_data *cfqd = e->elevator_data;
165125e1 4394 struct request_queue *q = cfqd->queue;
22e2c507 4395
3b18152c 4396 cfq_shutdown_timer_wq(cfqd);
e2d74ac0 4397
d9ff4187 4398 spin_lock_irq(q->queue_lock);
e2d74ac0 4399
d9ff4187 4400 if (cfqd->active_queue)
e5ff082e 4401 __cfq_slice_expired(cfqd, cfqd->active_queue, 0);
e2d74ac0 4402
03aa264a
TH
4403 spin_unlock_irq(q->queue_lock);
4404
a90d742e
AV
4405 cfq_shutdown_timer_wq(cfqd);
4406
ffea73fc
TH
4407#ifdef CONFIG_CFQ_GROUP_IOSCHED
4408 blkcg_deactivate_policy(q, &blkcg_policy_cfq);
4409#else
f51b802c 4410 kfree(cfqd->root_group);
2abae55f 4411#endif
56edf7d7 4412 kfree(cfqd);
1da177e4
LT
4413}
4414
d50235b7 4415static int cfq_init_queue(struct request_queue *q, struct elevator_type *e)
1da177e4
LT
4416{
4417 struct cfq_data *cfqd;
3c798398 4418 struct blkcg_gq *blkg __maybe_unused;
a2b1693b 4419 int i, ret;
d50235b7
JM
4420 struct elevator_queue *eq;
4421
4422 eq = elevator_alloc(q, e);
4423 if (!eq)
4424 return -ENOMEM;
1da177e4 4425
c1b511eb 4426 cfqd = kzalloc_node(sizeof(*cfqd), GFP_KERNEL, q->node);
d50235b7
JM
4427 if (!cfqd) {
4428 kobject_put(&eq->kobj);
b2fab5ac 4429 return -ENOMEM;
d50235b7
JM
4430 }
4431 eq->elevator_data = cfqd;
80b15c73 4432
f51b802c 4433 cfqd->queue = q;
d50235b7
JM
4434 spin_lock_irq(q->queue_lock);
4435 q->elevator = eq;
4436 spin_unlock_irq(q->queue_lock);
f51b802c 4437
1fa8f6d6
VG
4438 /* Init root service tree */
4439 cfqd->grp_service_tree = CFQ_RB_ROOT;
4440
f51b802c 4441 /* Init root group and prefer root group over other groups by default */
25fb5169 4442#ifdef CONFIG_CFQ_GROUP_IOSCHED
3c798398 4443 ret = blkcg_activate_policy(q, &blkcg_policy_cfq);
a2b1693b
TH
4444 if (ret)
4445 goto out_free;
f51b802c 4446
a2b1693b 4447 cfqd->root_group = blkg_to_cfqg(q->root_blkg);
f51b802c 4448#else
a2b1693b 4449 ret = -ENOMEM;
f51b802c
TH
4450 cfqd->root_group = kzalloc_node(sizeof(*cfqd->root_group),
4451 GFP_KERNEL, cfqd->queue->node);
a2b1693b
TH
4452 if (!cfqd->root_group)
4453 goto out_free;
5624a4e4 4454
a2b1693b
TH
4455 cfq_init_cfqg_base(cfqd->root_group);
4456#endif
3381cb8d 4457 cfqd->root_group->weight = 2 * CFQ_WEIGHT_DEFAULT;
e71357e1 4458 cfqd->root_group->leaf_weight = 2 * CFQ_WEIGHT_DEFAULT;
5624a4e4 4459
26a2ac00
JA
4460 /*
4461 * Not strictly needed (since RB_ROOT just clears the node and we
4462 * zeroed cfqd on alloc), but better be safe in case someone decides
4463 * to add magic to the rb code
4464 */
4465 for (i = 0; i < CFQ_PRIO_LISTS; i++)
4466 cfqd->prio_trees[i] = RB_ROOT;
4467
6118b70b 4468 /*
d4aad7ff 4469 * Our fallback cfqq if cfq_get_queue() runs into OOM issues.
6118b70b 4470 * Grab a permanent reference to it, so that the normal code flow
f51b802c
TH
4471 * will not attempt to free it. oom_cfqq is linked to root_group
4472 * but shouldn't hold a reference as it'll never be unlinked. Lose
4473 * the reference from linking right away.
6118b70b
JA
4474 */
4475 cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
30d7b944 4476 cfqd->oom_cfqq.ref++;
1adaf3dd
TH
4477
4478 spin_lock_irq(q->queue_lock);
f51b802c 4479 cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, cfqd->root_group);
eb7d8c07 4480 cfqg_put(cfqd->root_group);
1adaf3dd 4481 spin_unlock_irq(q->queue_lock);
1da177e4 4482
22e2c507
JA
4483 init_timer(&cfqd->idle_slice_timer);
4484 cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
4485 cfqd->idle_slice_timer.data = (unsigned long) cfqd;
4486
23e018a1 4487 INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
22e2c507 4488
1da177e4 4489 cfqd->cfq_quantum = cfq_quantum;
22e2c507
JA
4490 cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
4491 cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
1da177e4
LT
4492 cfqd->cfq_back_max = cfq_back_max;
4493 cfqd->cfq_back_penalty = cfq_back_penalty;
22e2c507
JA
4494 cfqd->cfq_slice[0] = cfq_slice_async;
4495 cfqd->cfq_slice[1] = cfq_slice_sync;
5bf14c07 4496 cfqd->cfq_target_latency = cfq_target_latency;
22e2c507 4497 cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
0bb97947 4498 cfqd->cfq_slice_idle = cfq_slice_idle;
80bdf0c7 4499 cfqd->cfq_group_idle = cfq_group_idle;
963b72fc 4500 cfqd->cfq_latency = 1;
e459dd08 4501 cfqd->hw_tag = -1;
edc71131
CZ
4502 /*
4503 * we optimistically start assuming sync ops weren't delayed in last
4504 * second, in order to have larger depth for async operations.
4505 */
573412b2 4506 cfqd->last_delayed_sync = jiffies - HZ;
b2fab5ac 4507 return 0;
a2b1693b
TH
4508
4509out_free:
4510 kfree(cfqd);
d50235b7 4511 kobject_put(&eq->kobj);
a2b1693b 4512 return ret;
1da177e4
LT
4513}
4514
0bb97947
JA
4515static void cfq_registered_queue(struct request_queue *q)
4516{
4517 struct elevator_queue *e = q->elevator;
4518 struct cfq_data *cfqd = e->elevator_data;
4519
4520 /*
4521 * Default to IOPS mode with no idling for SSDs
4522 */
4523 if (blk_queue_nonrot(q))
4524 cfqd->cfq_slice_idle = 0;
4525}
4526
1da177e4
LT
4527/*
4528 * sysfs parts below -->
4529 */
1da177e4
LT
4530static ssize_t
4531cfq_var_show(unsigned int var, char *page)
4532{
176167ad 4533 return sprintf(page, "%u\n", var);
1da177e4
LT
4534}
4535
4536static ssize_t
4537cfq_var_store(unsigned int *var, const char *page, size_t count)
4538{
4539 char *p = (char *) page;
4540
4541 *var = simple_strtoul(p, &p, 10);
4542 return count;
4543}
4544
1da177e4 4545#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
b374d18a 4546static ssize_t __FUNC(struct elevator_queue *e, char *page) \
1da177e4 4547{ \
3d1ab40f 4548 struct cfq_data *cfqd = e->elevator_data; \
1da177e4
LT
4549 unsigned int __data = __VAR; \
4550 if (__CONV) \
4551 __data = jiffies_to_msecs(__data); \
4552 return cfq_var_show(__data, (page)); \
4553}
4554SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
22e2c507
JA
4555SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
4556SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
e572ec7e
AV
4557SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
4558SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
22e2c507 4559SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
80bdf0c7 4560SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1);
22e2c507
JA
4561SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
4562SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
4563SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
963b72fc 4564SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
5bf14c07 4565SHOW_FUNCTION(cfq_target_latency_show, cfqd->cfq_target_latency, 1);
1da177e4
LT
4566#undef SHOW_FUNCTION
4567
4568#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
b374d18a 4569static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
1da177e4 4570{ \
3d1ab40f 4571 struct cfq_data *cfqd = e->elevator_data; \
1da177e4
LT
4572 unsigned int __data; \
4573 int ret = cfq_var_store(&__data, (page), count); \
4574 if (__data < (MIN)) \
4575 __data = (MIN); \
4576 else if (__data > (MAX)) \
4577 __data = (MAX); \
4578 if (__CONV) \
4579 *(__PTR) = msecs_to_jiffies(__data); \
4580 else \
4581 *(__PTR) = __data; \
4582 return ret; \
4583}
4584STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
fe094d98
JA
4585STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1,
4586 UINT_MAX, 1);
4587STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1,
4588 UINT_MAX, 1);
e572ec7e 4589STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
fe094d98
JA
4590STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
4591 UINT_MAX, 0);
22e2c507 4592STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
80bdf0c7 4593STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1);
22e2c507
JA
4594STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
4595STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
fe094d98
JA
4596STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
4597 UINT_MAX, 0);
963b72fc 4598STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
5bf14c07 4599STORE_FUNCTION(cfq_target_latency_store, &cfqd->cfq_target_latency, 1, UINT_MAX, 1);
1da177e4
LT
4600#undef STORE_FUNCTION
4601
e572ec7e
AV
4602#define CFQ_ATTR(name) \
4603 __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)
4604
4605static struct elv_fs_entry cfq_attrs[] = {
4606 CFQ_ATTR(quantum),
e572ec7e
AV
4607 CFQ_ATTR(fifo_expire_sync),
4608 CFQ_ATTR(fifo_expire_async),
4609 CFQ_ATTR(back_seek_max),
4610 CFQ_ATTR(back_seek_penalty),
4611 CFQ_ATTR(slice_sync),
4612 CFQ_ATTR(slice_async),
4613 CFQ_ATTR(slice_async_rq),
4614 CFQ_ATTR(slice_idle),
80bdf0c7 4615 CFQ_ATTR(group_idle),
963b72fc 4616 CFQ_ATTR(low_latency),
5bf14c07 4617 CFQ_ATTR(target_latency),
e572ec7e 4618 __ATTR_NULL
1da177e4
LT
4619};
4620
1da177e4
LT
4621static struct elevator_type iosched_cfq = {
4622 .ops = {
4623 .elevator_merge_fn = cfq_merge,
4624 .elevator_merged_fn = cfq_merged_request,
4625 .elevator_merge_req_fn = cfq_merged_requests,
da775265 4626 .elevator_allow_merge_fn = cfq_allow_merge,
812d4026 4627 .elevator_bio_merged_fn = cfq_bio_merged,
b4878f24 4628 .elevator_dispatch_fn = cfq_dispatch_requests,
1da177e4 4629 .elevator_add_req_fn = cfq_insert_request,
b4878f24 4630 .elevator_activate_req_fn = cfq_activate_request,
1da177e4 4631 .elevator_deactivate_req_fn = cfq_deactivate_request,
1da177e4 4632 .elevator_completed_req_fn = cfq_completed_request,
21183b07
JA
4633 .elevator_former_req_fn = elv_rb_former_request,
4634 .elevator_latter_req_fn = elv_rb_latter_request,
9b84cacd 4635 .elevator_init_icq_fn = cfq_init_icq,
7e5a8794 4636 .elevator_exit_icq_fn = cfq_exit_icq,
1da177e4
LT
4637 .elevator_set_req_fn = cfq_set_request,
4638 .elevator_put_req_fn = cfq_put_request,
4639 .elevator_may_queue_fn = cfq_may_queue,
4640 .elevator_init_fn = cfq_init_queue,
4641 .elevator_exit_fn = cfq_exit_queue,
0bb97947 4642 .elevator_registered_fn = cfq_registered_queue,
1da177e4 4643 },
3d3c2379
TH
4644 .icq_size = sizeof(struct cfq_io_cq),
4645 .icq_align = __alignof__(struct cfq_io_cq),
3d1ab40f 4646 .elevator_attrs = cfq_attrs,
3d3c2379 4647 .elevator_name = "cfq",
1da177e4
LT
4648 .elevator_owner = THIS_MODULE,
4649};
4650
3e252066 4651#ifdef CONFIG_CFQ_GROUP_IOSCHED
3c798398 4652static struct blkcg_policy blkcg_policy_cfq = {
e48453c3 4653 .cpd_size = sizeof(struct cfq_group_data),
f9fcc2d3
TH
4654 .cftypes = cfq_blkcg_files,
4655
e48453c3 4656 .cpd_init_fn = cfq_cpd_init,
001bea73 4657 .pd_alloc_fn = cfq_pd_alloc,
f9fcc2d3 4658 .pd_init_fn = cfq_pd_init,
0b39920b 4659 .pd_offline_fn = cfq_pd_offline,
001bea73 4660 .pd_free_fn = cfq_pd_free,
f9fcc2d3 4661 .pd_reset_stats_fn = cfq_pd_reset_stats,
3e252066 4662};
3e252066
VG
4663#endif
4664
1da177e4
LT
4665static int __init cfq_init(void)
4666{
3d3c2379
TH
4667 int ret;
4668
22e2c507
JA
4669 /*
4670 * could be 0 on HZ < 1000 setups
4671 */
4672 if (!cfq_slice_async)
4673 cfq_slice_async = 1;
4674 if (!cfq_slice_idle)
4675 cfq_slice_idle = 1;
4676
80bdf0c7
VG
4677#ifdef CONFIG_CFQ_GROUP_IOSCHED
4678 if (!cfq_group_idle)
4679 cfq_group_idle = 1;
8bd435b3 4680
3c798398 4681 ret = blkcg_policy_register(&blkcg_policy_cfq);
8bd435b3
TH
4682 if (ret)
4683 return ret;
ffea73fc
TH
4684#else
4685 cfq_group_idle = 0;
4686#endif
8bd435b3 4687
fd794956 4688 ret = -ENOMEM;
3d3c2379
TH
4689 cfq_pool = KMEM_CACHE(cfq_queue, 0);
4690 if (!cfq_pool)
8bd435b3 4691 goto err_pol_unreg;
1da177e4 4692
3d3c2379 4693 ret = elv_register(&iosched_cfq);
8bd435b3
TH
4694 if (ret)
4695 goto err_free_pool;
3d3c2379 4696
2fdd82bd 4697 return 0;
8bd435b3
TH
4698
4699err_free_pool:
4700 kmem_cache_destroy(cfq_pool);
4701err_pol_unreg:
ffea73fc 4702#ifdef CONFIG_CFQ_GROUP_IOSCHED
3c798398 4703 blkcg_policy_unregister(&blkcg_policy_cfq);
ffea73fc 4704#endif
8bd435b3 4705 return ret;
1da177e4
LT
4706}
4707
4708static void __exit cfq_exit(void)
4709{
ffea73fc 4710#ifdef CONFIG_CFQ_GROUP_IOSCHED
3c798398 4711 blkcg_policy_unregister(&blkcg_policy_cfq);
ffea73fc 4712#endif
1da177e4 4713 elv_unregister(&iosched_cfq);
3d3c2379 4714 kmem_cache_destroy(cfq_pool);
1da177e4
LT
4715}
4716
4717module_init(cfq_init);
4718module_exit(cfq_exit);
4719
4720MODULE_AUTHOR("Jens Axboe");
4721MODULE_LICENSE("GPL");
4722MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");