cfq-iosched: Fix wrong children_weight calculation
[linux-2.6-block.git] / block / cfq-iosched.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * CFQ, or complete fairness queueing, disk scheduler.
3 *
4 * Based on ideas from a previously unfinished io
5 * scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
6 *
0fe23479 7 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
1da177e4 8 */
1da177e4 9#include <linux/module.h>
5a0e3ad6 10#include <linux/slab.h>
1cc9be68
AV
11#include <linux/blkdev.h>
12#include <linux/elevator.h>
ad5ebd2f 13#include <linux/jiffies.h>
1da177e4 14#include <linux/rbtree.h>
22e2c507 15#include <linux/ioprio.h>
7b679138 16#include <linux/blktrace_api.h>
6e736be7 17#include "blk.h"
629ed0b1 18#include "blk-cgroup.h"
1da177e4
LT
19
20/*
21 * tunables
22 */
fe094d98 23/* max queue in one round of service */
abc3c744 24static const int cfq_quantum = 8;
64100099 25static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
fe094d98
JA
26/* maximum backwards seek, in KiB */
27static const int cfq_back_max = 16 * 1024;
28/* penalty of a backwards seek */
29static const int cfq_back_penalty = 2;
64100099 30static const int cfq_slice_sync = HZ / 10;
3b18152c 31static int cfq_slice_async = HZ / 25;
64100099 32static const int cfq_slice_async_rq = 2;
caaa5f9f 33static int cfq_slice_idle = HZ / 125;
80bdf0c7 34static int cfq_group_idle = HZ / 125;
5db5d642
CZ
35static const int cfq_target_latency = HZ * 3/10; /* 300 ms */
36static const int cfq_hist_divisor = 4;
22e2c507 37
d9e7620e 38/*
0871714e 39 * offset from end of service tree
d9e7620e 40 */
0871714e 41#define CFQ_IDLE_DELAY (HZ / 5)
d9e7620e
JA
42
43/*
44 * below this threshold, we consider thinktime immediate
45 */
46#define CFQ_MIN_TT (2)
47
22e2c507 48#define CFQ_SLICE_SCALE (5)
45333d5a 49#define CFQ_HW_QUEUE_MIN (5)
25bc6b07 50#define CFQ_SERVICE_SHIFT 12
22e2c507 51
3dde36dd 52#define CFQQ_SEEK_THR (sector_t)(8 * 100)
e9ce335d 53#define CFQQ_CLOSE_THR (sector_t)(8 * 1024)
41647e7a 54#define CFQQ_SECT_THR_NONROT (sector_t)(2 * 32)
3dde36dd 55#define CFQQ_SEEKY(cfqq) (hweight32(cfqq->seek_history) > 32/8)
ae54abed 56
a612fddf
TH
57#define RQ_CIC(rq) icq_to_cic((rq)->elv.icq)
58#define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elv.priv[0])
59#define RQ_CFQG(rq) (struct cfq_group *) ((rq)->elv.priv[1])
1da177e4 60
e18b890b 61static struct kmem_cache *cfq_pool;
1da177e4 62
22e2c507
JA
63#define CFQ_PRIO_LISTS IOPRIO_BE_NR
64#define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
22e2c507
JA
65#define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
66
206dc69b 67#define sample_valid(samples) ((samples) > 80)
1fa8f6d6 68#define rb_entry_cfqg(node) rb_entry((node), struct cfq_group, rb_node)
206dc69b 69
c5869807
TH
70struct cfq_ttime {
71 unsigned long last_end_request;
72
73 unsigned long ttime_total;
74 unsigned long ttime_samples;
75 unsigned long ttime_mean;
76};
77
cc09e299
JA
78/*
79 * Most of our rbtree usage is for sorting with min extraction, so
80 * if we cache the leftmost node we don't have to walk down the tree
81 * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should
82 * move this into the elevator for the rq sorting as well.
83 */
84struct cfq_rb_root {
85 struct rb_root rb;
86 struct rb_node *left;
aa6f6a3d 87 unsigned count;
1fa8f6d6 88 u64 min_vdisktime;
f5f2b6ce 89 struct cfq_ttime ttime;
cc09e299 90};
f5f2b6ce
SL
91#define CFQ_RB_ROOT (struct cfq_rb_root) { .rb = RB_ROOT, \
92 .ttime = {.last_end_request = jiffies,},}
cc09e299 93
6118b70b
JA
94/*
95 * Per process-grouping structure
96 */
97struct cfq_queue {
98 /* reference count */
30d7b944 99 int ref;
6118b70b
JA
100 /* various state flags, see below */
101 unsigned int flags;
102 /* parent cfq_data */
103 struct cfq_data *cfqd;
104 /* service_tree member */
105 struct rb_node rb_node;
106 /* service_tree key */
107 unsigned long rb_key;
108 /* prio tree member */
109 struct rb_node p_node;
110 /* prio tree root we belong to, if any */
111 struct rb_root *p_root;
112 /* sorted list of pending requests */
113 struct rb_root sort_list;
114 /* if fifo isn't expired, next request to serve */
115 struct request *next_rq;
116 /* requests queued in sort_list */
117 int queued[2];
118 /* currently allocated requests */
119 int allocated[2];
120 /* fifo list of requests in sort_list */
121 struct list_head fifo;
122
dae739eb
VG
123 /* time when queue got scheduled in to dispatch first request. */
124 unsigned long dispatch_start;
f75edf2d 125 unsigned int allocated_slice;
c4081ba5 126 unsigned int slice_dispatch;
dae739eb
VG
127 /* time when first request from queue completed and slice started. */
128 unsigned long slice_start;
6118b70b
JA
129 unsigned long slice_end;
130 long slice_resid;
6118b70b 131
65299a3b
CH
132 /* pending priority requests */
133 int prio_pending;
6118b70b
JA
134 /* number of requests that are on the dispatch list or inside driver */
135 int dispatched;
136
137 /* io prio of this group */
138 unsigned short ioprio, org_ioprio;
4aede84b 139 unsigned short ioprio_class;
6118b70b 140
c4081ba5
RK
141 pid_t pid;
142
3dde36dd 143 u32 seek_history;
b2c18e1e
JM
144 sector_t last_request_pos;
145
aa6f6a3d 146 struct cfq_rb_root *service_tree;
df5fe3e8 147 struct cfq_queue *new_cfqq;
cdb16e8f 148 struct cfq_group *cfqg;
c4e7893e
VG
149 /* Number of sectors dispatched from queue in single dispatch round */
150 unsigned long nr_sectors;
6118b70b
JA
151};
152
c0324a02 153/*
718eee05 154 * First index in the service_trees.
c0324a02
CZ
155 * IDLE is handled separately, so it has negative index
156 */
3bf10fea 157enum wl_class_t {
c0324a02 158 BE_WORKLOAD = 0,
615f0259
VG
159 RT_WORKLOAD = 1,
160 IDLE_WORKLOAD = 2,
b4627321 161 CFQ_PRIO_NR,
c0324a02
CZ
162};
163
718eee05
CZ
164/*
165 * Second index in the service_trees.
166 */
167enum wl_type_t {
168 ASYNC_WORKLOAD = 0,
169 SYNC_NOIDLE_WORKLOAD = 1,
170 SYNC_WORKLOAD = 2
171};
172
155fead9
TH
173struct cfqg_stats {
174#ifdef CONFIG_CFQ_GROUP_IOSCHED
175 /* total bytes transferred */
176 struct blkg_rwstat service_bytes;
177 /* total IOs serviced, post merge */
178 struct blkg_rwstat serviced;
179 /* number of ios merged */
180 struct blkg_rwstat merged;
181 /* total time spent on device in ns, may not be accurate w/ queueing */
182 struct blkg_rwstat service_time;
183 /* total time spent waiting in scheduler queue in ns */
184 struct blkg_rwstat wait_time;
185 /* number of IOs queued up */
186 struct blkg_rwstat queued;
187 /* total sectors transferred */
188 struct blkg_stat sectors;
189 /* total disk time and nr sectors dispatched by this group */
190 struct blkg_stat time;
191#ifdef CONFIG_DEBUG_BLK_CGROUP
192 /* time not charged to this cgroup */
193 struct blkg_stat unaccounted_time;
194 /* sum of number of ios queued across all samples */
195 struct blkg_stat avg_queue_size_sum;
196 /* count of samples taken for average */
197 struct blkg_stat avg_queue_size_samples;
198 /* how many times this group has been removed from service tree */
199 struct blkg_stat dequeue;
200 /* total time spent waiting for it to be assigned a timeslice. */
201 struct blkg_stat group_wait_time;
3c798398 202 /* time spent idling for this blkcg_gq */
155fead9
TH
203 struct blkg_stat idle_time;
204 /* total time with empty current active q with other requests queued */
205 struct blkg_stat empty_time;
206 /* fields after this shouldn't be cleared on stat reset */
207 uint64_t start_group_wait_time;
208 uint64_t start_idle_time;
209 uint64_t start_empty_time;
210 uint16_t flags;
211#endif /* CONFIG_DEBUG_BLK_CGROUP */
212#endif /* CONFIG_CFQ_GROUP_IOSCHED */
213};
214
cdb16e8f
VG
215/* This is per cgroup per device grouping structure */
216struct cfq_group {
f95a04af
TH
217 /* must be the first member */
218 struct blkg_policy_data pd;
219
1fa8f6d6
VG
220 /* group service_tree member */
221 struct rb_node rb_node;
222
223 /* group service_tree key */
224 u64 vdisktime;
e71357e1 225
7918ffb5
TH
226 /*
227 * The number of active cfqgs and sum of their weights under this
228 * cfqg. This covers this cfqg's leaf_weight and all children's
229 * weights, but does not cover weights of further descendants.
230 *
231 * If a cfqg is on the service tree, it's active. An active cfqg
232 * also activates its parent and contributes to the children_weight
233 * of the parent.
234 */
235 int nr_active;
236 unsigned int children_weight;
237
1d3650f7
TH
238 /*
239 * vfraction is the fraction of vdisktime that the tasks in this
240 * cfqg are entitled to. This is determined by compounding the
241 * ratios walking up from this cfqg to the root.
242 *
243 * It is in fixed point w/ CFQ_SERVICE_SHIFT and the sum of all
244 * vfractions on a service tree is approximately 1. The sum may
245 * deviate a bit due to rounding errors and fluctuations caused by
246 * cfqgs entering and leaving the service tree.
247 */
248 unsigned int vfraction;
249
e71357e1
TH
250 /*
251 * There are two weights - (internal) weight is the weight of this
252 * cfqg against the sibling cfqgs. leaf_weight is the wight of
253 * this cfqg against the child cfqgs. For the root cfqg, both
254 * weights are kept in sync for backward compatibility.
255 */
25bc6b07 256 unsigned int weight;
8184f93e 257 unsigned int new_weight;
3381cb8d 258 unsigned int dev_weight;
1fa8f6d6 259
e71357e1
TH
260 unsigned int leaf_weight;
261 unsigned int new_leaf_weight;
262 unsigned int dev_leaf_weight;
263
1fa8f6d6
VG
264 /* number of cfqq currently on this group */
265 int nr_cfqq;
266
cdb16e8f 267 /*
4495a7d4 268 * Per group busy queues average. Useful for workload slice calc. We
b4627321
VG
269 * create the array for each prio class but at run time it is used
270 * only for RT and BE class and slot for IDLE class remains unused.
271 * This is primarily done to avoid confusion and a gcc warning.
272 */
273 unsigned int busy_queues_avg[CFQ_PRIO_NR];
274 /*
275 * rr lists of queues with requests. We maintain service trees for
276 * RT and BE classes. These trees are subdivided in subclasses
277 * of SYNC, SYNC_NOIDLE and ASYNC based on workload type. For IDLE
278 * class there is no subclassification and all the cfq queues go on
279 * a single tree service_tree_idle.
cdb16e8f
VG
280 * Counts are embedded in the cfq_rb_root
281 */
282 struct cfq_rb_root service_trees[2][3];
283 struct cfq_rb_root service_tree_idle;
dae739eb 284
4d2ceea4
VG
285 unsigned long saved_wl_slice;
286 enum wl_type_t saved_wl_type;
287 enum wl_class_t saved_wl_class;
4eef3049 288
80bdf0c7
VG
289 /* number of requests that are on the dispatch list or inside driver */
290 int dispatched;
7700fc4f 291 struct cfq_ttime ttime;
0b39920b
TH
292 struct cfqg_stats stats; /* stats for this cfqg */
293 struct cfqg_stats dead_stats; /* stats pushed from dead children */
cdb16e8f 294};
718eee05 295
c5869807
TH
296struct cfq_io_cq {
297 struct io_cq icq; /* must be the first member */
298 struct cfq_queue *cfqq[2];
299 struct cfq_ttime ttime;
598971bf
TH
300 int ioprio; /* the current ioprio */
301#ifdef CONFIG_CFQ_GROUP_IOSCHED
302 uint64_t blkcg_id; /* the current blkcg ID */
303#endif
c5869807
TH
304};
305
22e2c507
JA
306/*
307 * Per block device queue structure
308 */
1da177e4 309struct cfq_data {
165125e1 310 struct request_queue *queue;
1fa8f6d6
VG
311 /* Root service tree for cfq_groups */
312 struct cfq_rb_root grp_service_tree;
f51b802c 313 struct cfq_group *root_group;
22e2c507 314
c0324a02
CZ
315 /*
316 * The priority currently being served
22e2c507 317 */
4d2ceea4
VG
318 enum wl_class_t serving_wl_class;
319 enum wl_type_t serving_wl_type;
718eee05 320 unsigned long workload_expires;
cdb16e8f 321 struct cfq_group *serving_group;
a36e71f9
JA
322
323 /*
324 * Each priority tree is sorted by next_request position. These
325 * trees are used when determining if two or more queues are
326 * interleaving requests (see cfq_close_cooperator).
327 */
328 struct rb_root prio_trees[CFQ_PRIO_LISTS];
329
22e2c507 330 unsigned int busy_queues;
ef8a41df 331 unsigned int busy_sync_queues;
22e2c507 332
53c583d2
CZ
333 int rq_in_driver;
334 int rq_in_flight[2];
45333d5a
AC
335
336 /*
337 * queue-depth detection
338 */
339 int rq_queued;
25776e35 340 int hw_tag;
e459dd08
CZ
341 /*
342 * hw_tag can be
343 * -1 => indeterminate, (cfq will behave as if NCQ is present, to allow better detection)
344 * 1 => NCQ is present (hw_tag_est_depth is the estimated max depth)
345 * 0 => no NCQ
346 */
347 int hw_tag_est_depth;
348 unsigned int hw_tag_samples;
1da177e4 349
22e2c507
JA
350 /*
351 * idle window management
352 */
353 struct timer_list idle_slice_timer;
23e018a1 354 struct work_struct unplug_work;
1da177e4 355
22e2c507 356 struct cfq_queue *active_queue;
c5869807 357 struct cfq_io_cq *active_cic;
22e2c507 358
c2dea2d1
VT
359 /*
360 * async queue for each priority case
361 */
362 struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
363 struct cfq_queue *async_idle_cfqq;
15c31be4 364
6d048f53 365 sector_t last_position;
1da177e4 366
1da177e4
LT
367 /*
368 * tunables, see top of file
369 */
370 unsigned int cfq_quantum;
22e2c507 371 unsigned int cfq_fifo_expire[2];
1da177e4
LT
372 unsigned int cfq_back_penalty;
373 unsigned int cfq_back_max;
22e2c507
JA
374 unsigned int cfq_slice[2];
375 unsigned int cfq_slice_async_rq;
376 unsigned int cfq_slice_idle;
80bdf0c7 377 unsigned int cfq_group_idle;
963b72fc 378 unsigned int cfq_latency;
5bf14c07 379 unsigned int cfq_target_latency;
d9ff4187 380
6118b70b
JA
381 /*
382 * Fallback dummy cfqq for extreme OOM conditions
383 */
384 struct cfq_queue oom_cfqq;
365722bb 385
573412b2 386 unsigned long last_delayed_sync;
1da177e4
LT
387};
388
25fb5169
VG
389static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
390
34b98d03 391static struct cfq_rb_root *st_for(struct cfq_group *cfqg,
3bf10fea 392 enum wl_class_t class,
65b32a57 393 enum wl_type_t type)
c0324a02 394{
1fa8f6d6
VG
395 if (!cfqg)
396 return NULL;
397
3bf10fea 398 if (class == IDLE_WORKLOAD)
cdb16e8f 399 return &cfqg->service_tree_idle;
c0324a02 400
3bf10fea 401 return &cfqg->service_trees[class][type];
c0324a02
CZ
402}
403
3b18152c 404enum cfqq_state_flags {
b0b8d749
JA
405 CFQ_CFQQ_FLAG_on_rr = 0, /* on round-robin busy list */
406 CFQ_CFQQ_FLAG_wait_request, /* waiting for a request */
b029195d 407 CFQ_CFQQ_FLAG_must_dispatch, /* must be allowed a dispatch */
b0b8d749 408 CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */
b0b8d749
JA
409 CFQ_CFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */
410 CFQ_CFQQ_FLAG_idle_window, /* slice idling enabled */
411 CFQ_CFQQ_FLAG_prio_changed, /* task priority has changed */
44f7c160 412 CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */
91fac317 413 CFQ_CFQQ_FLAG_sync, /* synchronous queue */
b3b6d040 414 CFQ_CFQQ_FLAG_coop, /* cfqq is shared */
ae54abed 415 CFQ_CFQQ_FLAG_split_coop, /* shared cfqq will be splitted */
76280aff 416 CFQ_CFQQ_FLAG_deep, /* sync cfqq experienced large depth */
f75edf2d 417 CFQ_CFQQ_FLAG_wait_busy, /* Waiting for next request */
3b18152c
JA
418};
419
420#define CFQ_CFQQ_FNS(name) \
421static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq) \
422{ \
fe094d98 423 (cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name); \
3b18152c
JA
424} \
425static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq) \
426{ \
fe094d98 427 (cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name); \
3b18152c
JA
428} \
429static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \
430{ \
fe094d98 431 return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0; \
3b18152c
JA
432}
433
434CFQ_CFQQ_FNS(on_rr);
435CFQ_CFQQ_FNS(wait_request);
b029195d 436CFQ_CFQQ_FNS(must_dispatch);
3b18152c 437CFQ_CFQQ_FNS(must_alloc_slice);
3b18152c
JA
438CFQ_CFQQ_FNS(fifo_expire);
439CFQ_CFQQ_FNS(idle_window);
440CFQ_CFQQ_FNS(prio_changed);
44f7c160 441CFQ_CFQQ_FNS(slice_new);
91fac317 442CFQ_CFQQ_FNS(sync);
a36e71f9 443CFQ_CFQQ_FNS(coop);
ae54abed 444CFQ_CFQQ_FNS(split_coop);
76280aff 445CFQ_CFQQ_FNS(deep);
f75edf2d 446CFQ_CFQQ_FNS(wait_busy);
3b18152c
JA
447#undef CFQ_CFQQ_FNS
448
f95a04af
TH
449static inline struct cfq_group *pd_to_cfqg(struct blkg_policy_data *pd)
450{
451 return pd ? container_of(pd, struct cfq_group, pd) : NULL;
452}
453
f95a04af
TH
454static inline struct blkcg_gq *cfqg_to_blkg(struct cfq_group *cfqg)
455{
456 return pd_to_blkg(&cfqg->pd);
457}
458
629ed0b1 459#if defined(CONFIG_CFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
2ce4d50f 460
155fead9
TH
461/* cfqg stats flags */
462enum cfqg_stats_flags {
463 CFQG_stats_waiting = 0,
464 CFQG_stats_idling,
465 CFQG_stats_empty,
629ed0b1
TH
466};
467
155fead9
TH
468#define CFQG_FLAG_FNS(name) \
469static inline void cfqg_stats_mark_##name(struct cfqg_stats *stats) \
629ed0b1 470{ \
155fead9 471 stats->flags |= (1 << CFQG_stats_##name); \
629ed0b1 472} \
155fead9 473static inline void cfqg_stats_clear_##name(struct cfqg_stats *stats) \
629ed0b1 474{ \
155fead9 475 stats->flags &= ~(1 << CFQG_stats_##name); \
629ed0b1 476} \
155fead9 477static inline int cfqg_stats_##name(struct cfqg_stats *stats) \
629ed0b1 478{ \
155fead9 479 return (stats->flags & (1 << CFQG_stats_##name)) != 0; \
629ed0b1
TH
480} \
481
155fead9
TH
482CFQG_FLAG_FNS(waiting)
483CFQG_FLAG_FNS(idling)
484CFQG_FLAG_FNS(empty)
485#undef CFQG_FLAG_FNS
629ed0b1
TH
486
487/* This should be called with the queue_lock held. */
155fead9 488static void cfqg_stats_update_group_wait_time(struct cfqg_stats *stats)
629ed0b1
TH
489{
490 unsigned long long now;
491
155fead9 492 if (!cfqg_stats_waiting(stats))
629ed0b1
TH
493 return;
494
495 now = sched_clock();
496 if (time_after64(now, stats->start_group_wait_time))
497 blkg_stat_add(&stats->group_wait_time,
498 now - stats->start_group_wait_time);
155fead9 499 cfqg_stats_clear_waiting(stats);
629ed0b1
TH
500}
501
502/* This should be called with the queue_lock held. */
155fead9
TH
503static void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg,
504 struct cfq_group *curr_cfqg)
629ed0b1 505{
155fead9 506 struct cfqg_stats *stats = &cfqg->stats;
629ed0b1 507
155fead9 508 if (cfqg_stats_waiting(stats))
629ed0b1 509 return;
155fead9 510 if (cfqg == curr_cfqg)
629ed0b1 511 return;
155fead9
TH
512 stats->start_group_wait_time = sched_clock();
513 cfqg_stats_mark_waiting(stats);
629ed0b1
TH
514}
515
516/* This should be called with the queue_lock held. */
155fead9 517static void cfqg_stats_end_empty_time(struct cfqg_stats *stats)
629ed0b1
TH
518{
519 unsigned long long now;
520
155fead9 521 if (!cfqg_stats_empty(stats))
629ed0b1
TH
522 return;
523
524 now = sched_clock();
525 if (time_after64(now, stats->start_empty_time))
526 blkg_stat_add(&stats->empty_time,
527 now - stats->start_empty_time);
155fead9 528 cfqg_stats_clear_empty(stats);
629ed0b1
TH
529}
530
155fead9 531static void cfqg_stats_update_dequeue(struct cfq_group *cfqg)
629ed0b1 532{
155fead9 533 blkg_stat_add(&cfqg->stats.dequeue, 1);
629ed0b1
TH
534}
535
155fead9 536static void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg)
629ed0b1 537{
155fead9 538 struct cfqg_stats *stats = &cfqg->stats;
629ed0b1 539
4d5e80a7 540 if (blkg_rwstat_total(&stats->queued))
629ed0b1
TH
541 return;
542
543 /*
544 * group is already marked empty. This can happen if cfqq got new
545 * request in parent group and moved to this group while being added
546 * to service tree. Just ignore the event and move on.
547 */
155fead9 548 if (cfqg_stats_empty(stats))
629ed0b1
TH
549 return;
550
551 stats->start_empty_time = sched_clock();
155fead9 552 cfqg_stats_mark_empty(stats);
629ed0b1
TH
553}
554
155fead9 555static void cfqg_stats_update_idle_time(struct cfq_group *cfqg)
629ed0b1 556{
155fead9 557 struct cfqg_stats *stats = &cfqg->stats;
629ed0b1 558
155fead9 559 if (cfqg_stats_idling(stats)) {
629ed0b1
TH
560 unsigned long long now = sched_clock();
561
562 if (time_after64(now, stats->start_idle_time))
563 blkg_stat_add(&stats->idle_time,
564 now - stats->start_idle_time);
155fead9 565 cfqg_stats_clear_idling(stats);
629ed0b1
TH
566 }
567}
568
155fead9 569static void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg)
629ed0b1 570{
155fead9 571 struct cfqg_stats *stats = &cfqg->stats;
629ed0b1 572
155fead9 573 BUG_ON(cfqg_stats_idling(stats));
629ed0b1
TH
574
575 stats->start_idle_time = sched_clock();
155fead9 576 cfqg_stats_mark_idling(stats);
629ed0b1
TH
577}
578
155fead9 579static void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg)
629ed0b1 580{
155fead9 581 struct cfqg_stats *stats = &cfqg->stats;
629ed0b1
TH
582
583 blkg_stat_add(&stats->avg_queue_size_sum,
4d5e80a7 584 blkg_rwstat_total(&stats->queued));
629ed0b1 585 blkg_stat_add(&stats->avg_queue_size_samples, 1);
155fead9 586 cfqg_stats_update_group_wait_time(stats);
629ed0b1
TH
587}
588
589#else /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
590
f48ec1d7
TH
591static inline void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg, struct cfq_group *curr_cfqg) { }
592static inline void cfqg_stats_end_empty_time(struct cfqg_stats *stats) { }
593static inline void cfqg_stats_update_dequeue(struct cfq_group *cfqg) { }
594static inline void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg) { }
595static inline void cfqg_stats_update_idle_time(struct cfq_group *cfqg) { }
596static inline void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg) { }
597static inline void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg) { }
629ed0b1
TH
598
599#endif /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
600
601#ifdef CONFIG_CFQ_GROUP_IOSCHED
2ce4d50f 602
ffea73fc
TH
603static struct blkcg_policy blkcg_policy_cfq;
604
605static inline struct cfq_group *blkg_to_cfqg(struct blkcg_gq *blkg)
606{
607 return pd_to_cfqg(blkg_to_pd(blkg, &blkcg_policy_cfq));
608}
609
d02f7aa8 610static inline struct cfq_group *cfqg_parent(struct cfq_group *cfqg)
7918ffb5 611{
d02f7aa8 612 struct blkcg_gq *pblkg = cfqg_to_blkg(cfqg)->parent;
7918ffb5 613
d02f7aa8 614 return pblkg ? blkg_to_cfqg(pblkg) : NULL;
7918ffb5
TH
615}
616
eb7d8c07
TH
617static inline void cfqg_get(struct cfq_group *cfqg)
618{
619 return blkg_get(cfqg_to_blkg(cfqg));
620}
621
622static inline void cfqg_put(struct cfq_group *cfqg)
623{
624 return blkg_put(cfqg_to_blkg(cfqg));
625}
626
54e7ed12
TH
627#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) do { \
628 char __pbuf[128]; \
629 \
630 blkg_path(cfqg_to_blkg((cfqq)->cfqg), __pbuf, sizeof(__pbuf)); \
b226e5c4
VG
631 blk_add_trace_msg((cfqd)->queue, "cfq%d%c%c %s " fmt, (cfqq)->pid, \
632 cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
633 cfqq_type((cfqq)) == SYNC_NOIDLE_WORKLOAD ? 'N' : ' ',\
54e7ed12
TH
634 __pbuf, ##args); \
635} while (0)
636
637#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do { \
638 char __pbuf[128]; \
639 \
640 blkg_path(cfqg_to_blkg(cfqg), __pbuf, sizeof(__pbuf)); \
641 blk_add_trace_msg((cfqd)->queue, "%s " fmt, __pbuf, ##args); \
642} while (0)
2868ef7b 643
155fead9
TH
644static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
645 struct cfq_group *curr_cfqg, int rw)
2ce4d50f 646{
155fead9
TH
647 blkg_rwstat_add(&cfqg->stats.queued, rw, 1);
648 cfqg_stats_end_empty_time(&cfqg->stats);
649 cfqg_stats_set_start_group_wait_time(cfqg, curr_cfqg);
2ce4d50f
TH
650}
651
155fead9
TH
652static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
653 unsigned long time, unsigned long unaccounted_time)
2ce4d50f 654{
155fead9 655 blkg_stat_add(&cfqg->stats.time, time);
629ed0b1 656#ifdef CONFIG_DEBUG_BLK_CGROUP
155fead9 657 blkg_stat_add(&cfqg->stats.unaccounted_time, unaccounted_time);
629ed0b1 658#endif
2ce4d50f
TH
659}
660
155fead9 661static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int rw)
2ce4d50f 662{
155fead9 663 blkg_rwstat_add(&cfqg->stats.queued, rw, -1);
2ce4d50f
TH
664}
665
155fead9 666static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int rw)
2ce4d50f 667{
155fead9 668 blkg_rwstat_add(&cfqg->stats.merged, rw, 1);
2ce4d50f
TH
669}
670
155fead9
TH
671static inline void cfqg_stats_update_dispatch(struct cfq_group *cfqg,
672 uint64_t bytes, int rw)
2ce4d50f 673{
155fead9
TH
674 blkg_stat_add(&cfqg->stats.sectors, bytes >> 9);
675 blkg_rwstat_add(&cfqg->stats.serviced, rw, 1);
676 blkg_rwstat_add(&cfqg->stats.service_bytes, rw, bytes);
2ce4d50f
TH
677}
678
155fead9
TH
679static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
680 uint64_t start_time, uint64_t io_start_time, int rw)
2ce4d50f 681{
155fead9 682 struct cfqg_stats *stats = &cfqg->stats;
629ed0b1 683 unsigned long long now = sched_clock();
629ed0b1
TH
684
685 if (time_after64(now, io_start_time))
686 blkg_rwstat_add(&stats->service_time, rw, now - io_start_time);
687 if (time_after64(io_start_time, start_time))
688 blkg_rwstat_add(&stats->wait_time, rw,
689 io_start_time - start_time);
2ce4d50f
TH
690}
691
689665af
TH
692/* @stats = 0 */
693static void cfqg_stats_reset(struct cfqg_stats *stats)
155fead9 694{
155fead9
TH
695 /* queued stats shouldn't be cleared */
696 blkg_rwstat_reset(&stats->service_bytes);
697 blkg_rwstat_reset(&stats->serviced);
698 blkg_rwstat_reset(&stats->merged);
699 blkg_rwstat_reset(&stats->service_time);
700 blkg_rwstat_reset(&stats->wait_time);
701 blkg_stat_reset(&stats->time);
702#ifdef CONFIG_DEBUG_BLK_CGROUP
703 blkg_stat_reset(&stats->unaccounted_time);
704 blkg_stat_reset(&stats->avg_queue_size_sum);
705 blkg_stat_reset(&stats->avg_queue_size_samples);
706 blkg_stat_reset(&stats->dequeue);
707 blkg_stat_reset(&stats->group_wait_time);
708 blkg_stat_reset(&stats->idle_time);
709 blkg_stat_reset(&stats->empty_time);
710#endif
711}
712
0b39920b
TH
713/* @to += @from */
714static void cfqg_stats_merge(struct cfqg_stats *to, struct cfqg_stats *from)
715{
716 /* queued stats shouldn't be cleared */
717 blkg_rwstat_merge(&to->service_bytes, &from->service_bytes);
718 blkg_rwstat_merge(&to->serviced, &from->serviced);
719 blkg_rwstat_merge(&to->merged, &from->merged);
720 blkg_rwstat_merge(&to->service_time, &from->service_time);
721 blkg_rwstat_merge(&to->wait_time, &from->wait_time);
722 blkg_stat_merge(&from->time, &from->time);
723#ifdef CONFIG_DEBUG_BLK_CGROUP
724 blkg_stat_merge(&to->unaccounted_time, &from->unaccounted_time);
725 blkg_stat_merge(&to->avg_queue_size_sum, &from->avg_queue_size_sum);
726 blkg_stat_merge(&to->avg_queue_size_samples, &from->avg_queue_size_samples);
727 blkg_stat_merge(&to->dequeue, &from->dequeue);
728 blkg_stat_merge(&to->group_wait_time, &from->group_wait_time);
729 blkg_stat_merge(&to->idle_time, &from->idle_time);
730 blkg_stat_merge(&to->empty_time, &from->empty_time);
731#endif
732}
733
734/*
735 * Transfer @cfqg's stats to its parent's dead_stats so that the ancestors'
736 * recursive stats can still account for the amount used by this cfqg after
737 * it's gone.
738 */
739static void cfqg_stats_xfer_dead(struct cfq_group *cfqg)
740{
741 struct cfq_group *parent = cfqg_parent(cfqg);
742
743 lockdep_assert_held(cfqg_to_blkg(cfqg)->q->queue_lock);
744
745 if (unlikely(!parent))
746 return;
747
748 cfqg_stats_merge(&parent->dead_stats, &cfqg->stats);
749 cfqg_stats_merge(&parent->dead_stats, &cfqg->dead_stats);
750 cfqg_stats_reset(&cfqg->stats);
751 cfqg_stats_reset(&cfqg->dead_stats);
752}
753
eb7d8c07
TH
754#else /* CONFIG_CFQ_GROUP_IOSCHED */
755
d02f7aa8 756static inline struct cfq_group *cfqg_parent(struct cfq_group *cfqg) { return NULL; }
eb7d8c07
TH
757static inline void cfqg_get(struct cfq_group *cfqg) { }
758static inline void cfqg_put(struct cfq_group *cfqg) { }
759
7b679138 760#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
b226e5c4
VG
761 blk_add_trace_msg((cfqd)->queue, "cfq%d%c%c " fmt, (cfqq)->pid, \
762 cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
763 cfqq_type((cfqq)) == SYNC_NOIDLE_WORKLOAD ? 'N' : ' ',\
764 ##args)
4495a7d4 765#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do {} while (0)
eb7d8c07 766
155fead9
TH
767static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
768 struct cfq_group *curr_cfqg, int rw) { }
769static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
770 unsigned long time, unsigned long unaccounted_time) { }
771static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int rw) { }
772static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int rw) { }
773static inline void cfqg_stats_update_dispatch(struct cfq_group *cfqg,
774 uint64_t bytes, int rw) { }
775static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
776 uint64_t start_time, uint64_t io_start_time, int rw) { }
2ce4d50f 777
eb7d8c07
TH
778#endif /* CONFIG_CFQ_GROUP_IOSCHED */
779
7b679138
JA
780#define cfq_log(cfqd, fmt, args...) \
781 blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
782
615f0259
VG
783/* Traverses through cfq group service trees */
784#define for_each_cfqg_st(cfqg, i, j, st) \
785 for (i = 0; i <= IDLE_WORKLOAD; i++) \
786 for (j = 0, st = i < IDLE_WORKLOAD ? &cfqg->service_trees[i][j]\
787 : &cfqg->service_tree_idle; \
788 (i < IDLE_WORKLOAD && j <= SYNC_WORKLOAD) || \
789 (i == IDLE_WORKLOAD && j == 0); \
790 j++, st = i < IDLE_WORKLOAD ? \
791 &cfqg->service_trees[i][j]: NULL) \
792
f5f2b6ce
SL
793static inline bool cfq_io_thinktime_big(struct cfq_data *cfqd,
794 struct cfq_ttime *ttime, bool group_idle)
795{
796 unsigned long slice;
797 if (!sample_valid(ttime->ttime_samples))
798 return false;
799 if (group_idle)
800 slice = cfqd->cfq_group_idle;
801 else
802 slice = cfqd->cfq_slice_idle;
803 return ttime->ttime_mean > slice;
804}
615f0259 805
02b35081
VG
806static inline bool iops_mode(struct cfq_data *cfqd)
807{
808 /*
809 * If we are not idling on queues and it is a NCQ drive, parallel
810 * execution of requests is on and measuring time is not possible
811 * in most of the cases until and unless we drive shallower queue
812 * depths and that becomes a performance bottleneck. In such cases
813 * switch to start providing fairness in terms of number of IOs.
814 */
815 if (!cfqd->cfq_slice_idle && cfqd->hw_tag)
816 return true;
817 else
818 return false;
819}
820
3bf10fea 821static inline enum wl_class_t cfqq_class(struct cfq_queue *cfqq)
c0324a02
CZ
822{
823 if (cfq_class_idle(cfqq))
824 return IDLE_WORKLOAD;
825 if (cfq_class_rt(cfqq))
826 return RT_WORKLOAD;
827 return BE_WORKLOAD;
828}
829
718eee05
CZ
830
831static enum wl_type_t cfqq_type(struct cfq_queue *cfqq)
832{
833 if (!cfq_cfqq_sync(cfqq))
834 return ASYNC_WORKLOAD;
835 if (!cfq_cfqq_idle_window(cfqq))
836 return SYNC_NOIDLE_WORKLOAD;
837 return SYNC_WORKLOAD;
838}
839
3bf10fea 840static inline int cfq_group_busy_queues_wl(enum wl_class_t wl_class,
58ff82f3
VG
841 struct cfq_data *cfqd,
842 struct cfq_group *cfqg)
c0324a02 843{
3bf10fea 844 if (wl_class == IDLE_WORKLOAD)
cdb16e8f 845 return cfqg->service_tree_idle.count;
c0324a02 846
34b98d03
VG
847 return cfqg->service_trees[wl_class][ASYNC_WORKLOAD].count +
848 cfqg->service_trees[wl_class][SYNC_NOIDLE_WORKLOAD].count +
849 cfqg->service_trees[wl_class][SYNC_WORKLOAD].count;
c0324a02
CZ
850}
851
f26bd1f0
VG
852static inline int cfqg_busy_async_queues(struct cfq_data *cfqd,
853 struct cfq_group *cfqg)
854{
34b98d03
VG
855 return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count +
856 cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count;
f26bd1f0
VG
857}
858
165125e1 859static void cfq_dispatch_insert(struct request_queue *, struct request *);
4f85cb96 860static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, bool is_sync,
abede6da 861 struct cfq_io_cq *cic, struct bio *bio,
4f85cb96 862 gfp_t gfp_mask);
91fac317 863
c5869807
TH
864static inline struct cfq_io_cq *icq_to_cic(struct io_cq *icq)
865{
866 /* cic->icq is the first member, %NULL will convert to %NULL */
867 return container_of(icq, struct cfq_io_cq, icq);
868}
869
47fdd4ca
TH
870static inline struct cfq_io_cq *cfq_cic_lookup(struct cfq_data *cfqd,
871 struct io_context *ioc)
872{
873 if (ioc)
874 return icq_to_cic(ioc_lookup_icq(ioc, cfqd->queue));
875 return NULL;
876}
877
c5869807 878static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_cq *cic, bool is_sync)
91fac317 879{
a6151c3a 880 return cic->cfqq[is_sync];
91fac317
VT
881}
882
c5869807
TH
883static inline void cic_set_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq,
884 bool is_sync)
91fac317 885{
a6151c3a 886 cic->cfqq[is_sync] = cfqq;
91fac317
VT
887}
888
c5869807 889static inline struct cfq_data *cic_to_cfqd(struct cfq_io_cq *cic)
bca4b914 890{
c5869807 891 return cic->icq.q->elevator->elevator_data;
bca4b914
KK
892}
893
91fac317
VT
894/*
895 * We regard a request as SYNC, if it's either a read or has the SYNC bit
896 * set (in which case it could also be direct WRITE).
897 */
a6151c3a 898static inline bool cfq_bio_sync(struct bio *bio)
91fac317 899{
7b6d91da 900 return bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC);
91fac317 901}
1da177e4 902
99f95e52
AM
903/*
904 * scheduler run of queue, if there are requests pending and no one in the
905 * driver that will restart queueing
906 */
23e018a1 907static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
99f95e52 908{
7b679138
JA
909 if (cfqd->busy_queues) {
910 cfq_log(cfqd, "schedule dispatch");
59c3d45e 911 kblockd_schedule_work(&cfqd->unplug_work);
7b679138 912 }
99f95e52
AM
913}
914
44f7c160
JA
915/*
916 * Scale schedule slice based on io priority. Use the sync time slice only
917 * if a queue is marked sync and has sync io queued. A sync queue with async
918 * io only, should not get full sync slice length.
919 */
a6151c3a 920static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync,
d9e7620e 921 unsigned short prio)
44f7c160 922{
d9e7620e 923 const int base_slice = cfqd->cfq_slice[sync];
44f7c160 924
d9e7620e
JA
925 WARN_ON(prio >= IOPRIO_BE_NR);
926
927 return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio));
928}
44f7c160 929
d9e7620e
JA
930static inline int
931cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
932{
933 return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
44f7c160
JA
934}
935
1d3650f7
TH
936/**
937 * cfqg_scale_charge - scale disk time charge according to cfqg weight
938 * @charge: disk time being charged
939 * @vfraction: vfraction of the cfqg, fixed point w/ CFQ_SERVICE_SHIFT
940 *
941 * Scale @charge according to @vfraction, which is in range (0, 1]. The
942 * scaling is inversely proportional.
943 *
944 * scaled = charge / vfraction
945 *
946 * The result is also in fixed point w/ CFQ_SERVICE_SHIFT.
947 */
948static inline u64 cfqg_scale_charge(unsigned long charge,
949 unsigned int vfraction)
25bc6b07 950{
1d3650f7 951 u64 c = charge << CFQ_SERVICE_SHIFT; /* make it fixed point */
25bc6b07 952
1d3650f7
TH
953 /* charge / vfraction */
954 c <<= CFQ_SERVICE_SHIFT;
955 do_div(c, vfraction);
956 return c;
25bc6b07
VG
957}
958
959static inline u64 max_vdisktime(u64 min_vdisktime, u64 vdisktime)
960{
961 s64 delta = (s64)(vdisktime - min_vdisktime);
962 if (delta > 0)
963 min_vdisktime = vdisktime;
964
965 return min_vdisktime;
966}
967
968static inline u64 min_vdisktime(u64 min_vdisktime, u64 vdisktime)
969{
970 s64 delta = (s64)(vdisktime - min_vdisktime);
971 if (delta < 0)
972 min_vdisktime = vdisktime;
973
974 return min_vdisktime;
975}
976
977static void update_min_vdisktime(struct cfq_rb_root *st)
978{
25bc6b07
VG
979 struct cfq_group *cfqg;
980
25bc6b07
VG
981 if (st->left) {
982 cfqg = rb_entry_cfqg(st->left);
a6032710
GJ
983 st->min_vdisktime = max_vdisktime(st->min_vdisktime,
984 cfqg->vdisktime);
25bc6b07 985 }
25bc6b07
VG
986}
987
5db5d642
CZ
988/*
989 * get averaged number of queues of RT/BE priority.
990 * average is updated, with a formula that gives more weight to higher numbers,
991 * to quickly follows sudden increases and decrease slowly
992 */
993
58ff82f3
VG
994static inline unsigned cfq_group_get_avg_queues(struct cfq_data *cfqd,
995 struct cfq_group *cfqg, bool rt)
5869619c 996{
5db5d642
CZ
997 unsigned min_q, max_q;
998 unsigned mult = cfq_hist_divisor - 1;
999 unsigned round = cfq_hist_divisor / 2;
58ff82f3 1000 unsigned busy = cfq_group_busy_queues_wl(rt, cfqd, cfqg);
5db5d642 1001
58ff82f3
VG
1002 min_q = min(cfqg->busy_queues_avg[rt], busy);
1003 max_q = max(cfqg->busy_queues_avg[rt], busy);
1004 cfqg->busy_queues_avg[rt] = (mult * max_q + min_q + round) /
5db5d642 1005 cfq_hist_divisor;
58ff82f3
VG
1006 return cfqg->busy_queues_avg[rt];
1007}
1008
1009static inline unsigned
1010cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg)
1011{
41cad6ab 1012 return cfqd->cfq_target_latency * cfqg->vfraction >> CFQ_SERVICE_SHIFT;
5db5d642
CZ
1013}
1014
c553f8e3 1015static inline unsigned
ba5bd520 1016cfq_scaled_cfqq_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
44f7c160 1017{
5db5d642
CZ
1018 unsigned slice = cfq_prio_to_slice(cfqd, cfqq);
1019 if (cfqd->cfq_latency) {
58ff82f3
VG
1020 /*
1021 * interested queues (we consider only the ones with the same
1022 * priority class in the cfq group)
1023 */
1024 unsigned iq = cfq_group_get_avg_queues(cfqd, cfqq->cfqg,
1025 cfq_class_rt(cfqq));
5db5d642
CZ
1026 unsigned sync_slice = cfqd->cfq_slice[1];
1027 unsigned expect_latency = sync_slice * iq;
58ff82f3
VG
1028 unsigned group_slice = cfq_group_slice(cfqd, cfqq->cfqg);
1029
1030 if (expect_latency > group_slice) {
5db5d642
CZ
1031 unsigned base_low_slice = 2 * cfqd->cfq_slice_idle;
1032 /* scale low_slice according to IO priority
1033 * and sync vs async */
1034 unsigned low_slice =
1035 min(slice, base_low_slice * slice / sync_slice);
1036 /* the adapted slice value is scaled to fit all iqs
1037 * into the target latency */
58ff82f3 1038 slice = max(slice * group_slice / expect_latency,
5db5d642
CZ
1039 low_slice);
1040 }
1041 }
c553f8e3
SL
1042 return slice;
1043}
1044
1045static inline void
1046cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1047{
ba5bd520 1048 unsigned slice = cfq_scaled_cfqq_slice(cfqd, cfqq);
c553f8e3 1049
dae739eb 1050 cfqq->slice_start = jiffies;
5db5d642 1051 cfqq->slice_end = jiffies + slice;
f75edf2d 1052 cfqq->allocated_slice = slice;
7b679138 1053 cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies);
44f7c160
JA
1054}
1055
1056/*
1057 * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
1058 * isn't valid until the first request from the dispatch is activated
1059 * and the slice time set.
1060 */
a6151c3a 1061static inline bool cfq_slice_used(struct cfq_queue *cfqq)
44f7c160
JA
1062{
1063 if (cfq_cfqq_slice_new(cfqq))
c1e44756 1064 return false;
44f7c160 1065 if (time_before(jiffies, cfqq->slice_end))
c1e44756 1066 return false;
44f7c160 1067
c1e44756 1068 return true;
44f7c160
JA
1069}
1070
1da177e4 1071/*
5e705374 1072 * Lifted from AS - choose which of rq1 and rq2 that is best served now.
1da177e4 1073 * We choose the request that is closest to the head right now. Distance
e8a99053 1074 * behind the head is penalized and only allowed to a certain extent.
1da177e4 1075 */
5e705374 1076static struct request *
cf7c25cf 1077cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2, sector_t last)
1da177e4 1078{
cf7c25cf 1079 sector_t s1, s2, d1 = 0, d2 = 0;
1da177e4 1080 unsigned long back_max;
e8a99053
AM
1081#define CFQ_RQ1_WRAP 0x01 /* request 1 wraps */
1082#define CFQ_RQ2_WRAP 0x02 /* request 2 wraps */
1083 unsigned wrap = 0; /* bit mask: requests behind the disk head? */
1da177e4 1084
5e705374
JA
1085 if (rq1 == NULL || rq1 == rq2)
1086 return rq2;
1087 if (rq2 == NULL)
1088 return rq1;
9c2c38a1 1089
229836bd
NK
1090 if (rq_is_sync(rq1) != rq_is_sync(rq2))
1091 return rq_is_sync(rq1) ? rq1 : rq2;
1092
65299a3b
CH
1093 if ((rq1->cmd_flags ^ rq2->cmd_flags) & REQ_PRIO)
1094 return rq1->cmd_flags & REQ_PRIO ? rq1 : rq2;
b53d1ed7 1095
83096ebf
TH
1096 s1 = blk_rq_pos(rq1);
1097 s2 = blk_rq_pos(rq2);
1da177e4 1098
1da177e4
LT
1099 /*
1100 * by definition, 1KiB is 2 sectors
1101 */
1102 back_max = cfqd->cfq_back_max * 2;
1103
1104 /*
1105 * Strict one way elevator _except_ in the case where we allow
1106 * short backward seeks which are biased as twice the cost of a
1107 * similar forward seek.
1108 */
1109 if (s1 >= last)
1110 d1 = s1 - last;
1111 else if (s1 + back_max >= last)
1112 d1 = (last - s1) * cfqd->cfq_back_penalty;
1113 else
e8a99053 1114 wrap |= CFQ_RQ1_WRAP;
1da177e4
LT
1115
1116 if (s2 >= last)
1117 d2 = s2 - last;
1118 else if (s2 + back_max >= last)
1119 d2 = (last - s2) * cfqd->cfq_back_penalty;
1120 else
e8a99053 1121 wrap |= CFQ_RQ2_WRAP;
1da177e4
LT
1122
1123 /* Found required data */
e8a99053
AM
1124
1125 /*
1126 * By doing switch() on the bit mask "wrap" we avoid having to
1127 * check two variables for all permutations: --> faster!
1128 */
1129 switch (wrap) {
5e705374 1130 case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
e8a99053 1131 if (d1 < d2)
5e705374 1132 return rq1;
e8a99053 1133 else if (d2 < d1)
5e705374 1134 return rq2;
e8a99053
AM
1135 else {
1136 if (s1 >= s2)
5e705374 1137 return rq1;
e8a99053 1138 else
5e705374 1139 return rq2;
e8a99053 1140 }
1da177e4 1141
e8a99053 1142 case CFQ_RQ2_WRAP:
5e705374 1143 return rq1;
e8a99053 1144 case CFQ_RQ1_WRAP:
5e705374
JA
1145 return rq2;
1146 case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
e8a99053
AM
1147 default:
1148 /*
1149 * Since both rqs are wrapped,
1150 * start with the one that's further behind head
1151 * (--> only *one* back seek required),
1152 * since back seek takes more time than forward.
1153 */
1154 if (s1 <= s2)
5e705374 1155 return rq1;
1da177e4 1156 else
5e705374 1157 return rq2;
1da177e4
LT
1158 }
1159}
1160
498d3aa2
JA
1161/*
1162 * The below is leftmost cache rbtree addon
1163 */
0871714e 1164static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root)
cc09e299 1165{
615f0259
VG
1166 /* Service tree is empty */
1167 if (!root->count)
1168 return NULL;
1169
cc09e299
JA
1170 if (!root->left)
1171 root->left = rb_first(&root->rb);
1172
0871714e
JA
1173 if (root->left)
1174 return rb_entry(root->left, struct cfq_queue, rb_node);
1175
1176 return NULL;
cc09e299
JA
1177}
1178
1fa8f6d6
VG
1179static struct cfq_group *cfq_rb_first_group(struct cfq_rb_root *root)
1180{
1181 if (!root->left)
1182 root->left = rb_first(&root->rb);
1183
1184 if (root->left)
1185 return rb_entry_cfqg(root->left);
1186
1187 return NULL;
1188}
1189
a36e71f9
JA
1190static void rb_erase_init(struct rb_node *n, struct rb_root *root)
1191{
1192 rb_erase(n, root);
1193 RB_CLEAR_NODE(n);
1194}
1195
cc09e299
JA
1196static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
1197{
1198 if (root->left == n)
1199 root->left = NULL;
a36e71f9 1200 rb_erase_init(n, &root->rb);
aa6f6a3d 1201 --root->count;
cc09e299
JA
1202}
1203
1da177e4
LT
1204/*
1205 * would be nice to take fifo expire time into account as well
1206 */
5e705374
JA
1207static struct request *
1208cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1209 struct request *last)
1da177e4 1210{
21183b07
JA
1211 struct rb_node *rbnext = rb_next(&last->rb_node);
1212 struct rb_node *rbprev = rb_prev(&last->rb_node);
5e705374 1213 struct request *next = NULL, *prev = NULL;
1da177e4 1214
21183b07 1215 BUG_ON(RB_EMPTY_NODE(&last->rb_node));
1da177e4
LT
1216
1217 if (rbprev)
5e705374 1218 prev = rb_entry_rq(rbprev);
1da177e4 1219
21183b07 1220 if (rbnext)
5e705374 1221 next = rb_entry_rq(rbnext);
21183b07
JA
1222 else {
1223 rbnext = rb_first(&cfqq->sort_list);
1224 if (rbnext && rbnext != &last->rb_node)
5e705374 1225 next = rb_entry_rq(rbnext);
21183b07 1226 }
1da177e4 1227
cf7c25cf 1228 return cfq_choose_req(cfqd, next, prev, blk_rq_pos(last));
1da177e4
LT
1229}
1230
d9e7620e
JA
1231static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
1232 struct cfq_queue *cfqq)
1da177e4 1233{
d9e7620e
JA
1234 /*
1235 * just an approximation, should be ok.
1236 */
cdb16e8f 1237 return (cfqq->cfqg->nr_cfqq - 1) * (cfq_prio_slice(cfqd, 1, 0) -
464191c6 1238 cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio));
d9e7620e
JA
1239}
1240
1fa8f6d6
VG
1241static inline s64
1242cfqg_key(struct cfq_rb_root *st, struct cfq_group *cfqg)
1243{
1244 return cfqg->vdisktime - st->min_vdisktime;
1245}
1246
1247static void
1248__cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
1249{
1250 struct rb_node **node = &st->rb.rb_node;
1251 struct rb_node *parent = NULL;
1252 struct cfq_group *__cfqg;
1253 s64 key = cfqg_key(st, cfqg);
1254 int left = 1;
1255
1256 while (*node != NULL) {
1257 parent = *node;
1258 __cfqg = rb_entry_cfqg(parent);
1259
1260 if (key < cfqg_key(st, __cfqg))
1261 node = &parent->rb_left;
1262 else {
1263 node = &parent->rb_right;
1264 left = 0;
1265 }
1266 }
1267
1268 if (left)
1269 st->left = &cfqg->rb_node;
1270
1271 rb_link_node(&cfqg->rb_node, parent, node);
1272 rb_insert_color(&cfqg->rb_node, &st->rb);
1273}
1274
1275static void
8184f93e
JT
1276cfq_update_group_weight(struct cfq_group *cfqg)
1277{
3381cb8d 1278 if (cfqg->new_weight) {
8184f93e 1279 cfqg->weight = cfqg->new_weight;
3381cb8d 1280 cfqg->new_weight = 0;
8184f93e 1281 }
e15693ef
TM
1282}
1283
1284static void
1285cfq_update_group_leaf_weight(struct cfq_group *cfqg)
1286{
1287 BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
e71357e1
TH
1288
1289 if (cfqg->new_leaf_weight) {
1290 cfqg->leaf_weight = cfqg->new_leaf_weight;
1291 cfqg->new_leaf_weight = 0;
1292 }
8184f93e
JT
1293}
1294
1295static void
1296cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
1297{
1d3650f7 1298 unsigned int vfr = 1 << CFQ_SERVICE_SHIFT; /* start with 1 */
7918ffb5 1299 struct cfq_group *pos = cfqg;
1d3650f7 1300 struct cfq_group *parent;
7918ffb5
TH
1301 bool propagate;
1302
1303 /* add to the service tree */
8184f93e
JT
1304 BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
1305
e15693ef 1306 cfq_update_group_leaf_weight(cfqg);
8184f93e 1307 __cfq_group_service_tree_add(st, cfqg);
7918ffb5
TH
1308
1309 /*
1d3650f7
TH
1310 * Activate @cfqg and calculate the portion of vfraction @cfqg is
1311 * entitled to. vfraction is calculated by walking the tree
1312 * towards the root calculating the fraction it has at each level.
1313 * The compounded ratio is how much vfraction @cfqg owns.
1314 *
1315 * Start with the proportion tasks in this cfqg has against active
1316 * children cfqgs - its leaf_weight against children_weight.
7918ffb5
TH
1317 */
1318 propagate = !pos->nr_active++;
1319 pos->children_weight += pos->leaf_weight;
1d3650f7 1320 vfr = vfr * pos->leaf_weight / pos->children_weight;
7918ffb5 1321
1d3650f7
TH
1322 /*
1323 * Compound ->weight walking up the tree. Both activation and
1324 * vfraction calculation are done in the same loop. Propagation
1325 * stops once an already activated node is met. vfraction
1326 * calculation should always continue to the root.
1327 */
d02f7aa8 1328 while ((parent = cfqg_parent(pos))) {
1d3650f7 1329 if (propagate) {
e15693ef 1330 cfq_update_group_weight(pos);
1d3650f7
TH
1331 propagate = !parent->nr_active++;
1332 parent->children_weight += pos->weight;
1333 }
1334 vfr = vfr * pos->weight / parent->children_weight;
7918ffb5
TH
1335 pos = parent;
1336 }
1d3650f7
TH
1337
1338 cfqg->vfraction = max_t(unsigned, vfr, 1);
8184f93e
JT
1339}
1340
1341static void
1342cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
1fa8f6d6
VG
1343{
1344 struct cfq_rb_root *st = &cfqd->grp_service_tree;
1345 struct cfq_group *__cfqg;
1346 struct rb_node *n;
1347
1348 cfqg->nr_cfqq++;
760701bf 1349 if (!RB_EMPTY_NODE(&cfqg->rb_node))
1fa8f6d6
VG
1350 return;
1351
1352 /*
1353 * Currently put the group at the end. Later implement something
1354 * so that groups get lesser vtime based on their weights, so that
25985edc 1355 * if group does not loose all if it was not continuously backlogged.
1fa8f6d6
VG
1356 */
1357 n = rb_last(&st->rb);
1358 if (n) {
1359 __cfqg = rb_entry_cfqg(n);
1360 cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY;
1361 } else
1362 cfqg->vdisktime = st->min_vdisktime;
8184f93e
JT
1363 cfq_group_service_tree_add(st, cfqg);
1364}
1fa8f6d6 1365
8184f93e
JT
1366static void
1367cfq_group_service_tree_del(struct cfq_rb_root *st, struct cfq_group *cfqg)
1368{
7918ffb5
TH
1369 struct cfq_group *pos = cfqg;
1370 bool propagate;
1371
1372 /*
1373 * Undo activation from cfq_group_service_tree_add(). Deactivate
1374 * @cfqg and propagate deactivation upwards.
1375 */
1376 propagate = !--pos->nr_active;
1377 pos->children_weight -= pos->leaf_weight;
1378
1379 while (propagate) {
d02f7aa8 1380 struct cfq_group *parent = cfqg_parent(pos);
7918ffb5
TH
1381
1382 /* @pos has 0 nr_active at this point */
1383 WARN_ON_ONCE(pos->children_weight);
1d3650f7 1384 pos->vfraction = 0;
7918ffb5
TH
1385
1386 if (!parent)
1387 break;
1388
1389 propagate = !--parent->nr_active;
1390 parent->children_weight -= pos->weight;
1391 pos = parent;
1392 }
1393
1394 /* remove from the service tree */
8184f93e
JT
1395 if (!RB_EMPTY_NODE(&cfqg->rb_node))
1396 cfq_rb_erase(&cfqg->rb_node, st);
1fa8f6d6
VG
1397}
1398
1399static void
8184f93e 1400cfq_group_notify_queue_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
1fa8f6d6
VG
1401{
1402 struct cfq_rb_root *st = &cfqd->grp_service_tree;
1403
1404 BUG_ON(cfqg->nr_cfqq < 1);
1405 cfqg->nr_cfqq--;
25bc6b07 1406
1fa8f6d6
VG
1407 /* If there are other cfq queues under this group, don't delete it */
1408 if (cfqg->nr_cfqq)
1409 return;
1410
2868ef7b 1411 cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
8184f93e 1412 cfq_group_service_tree_del(st, cfqg);
4d2ceea4 1413 cfqg->saved_wl_slice = 0;
155fead9 1414 cfqg_stats_update_dequeue(cfqg);
dae739eb
VG
1415}
1416
167400d3
JT
1417static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq,
1418 unsigned int *unaccounted_time)
dae739eb 1419{
f75edf2d 1420 unsigned int slice_used;
dae739eb
VG
1421
1422 /*
1423 * Queue got expired before even a single request completed or
1424 * got expired immediately after first request completion.
1425 */
1426 if (!cfqq->slice_start || cfqq->slice_start == jiffies) {
1427 /*
1428 * Also charge the seek time incurred to the group, otherwise
1429 * if there are mutiple queues in the group, each can dispatch
1430 * a single request on seeky media and cause lots of seek time
1431 * and group will never know it.
1432 */
1433 slice_used = max_t(unsigned, (jiffies - cfqq->dispatch_start),
1434 1);
1435 } else {
1436 slice_used = jiffies - cfqq->slice_start;
167400d3
JT
1437 if (slice_used > cfqq->allocated_slice) {
1438 *unaccounted_time = slice_used - cfqq->allocated_slice;
f75edf2d 1439 slice_used = cfqq->allocated_slice;
167400d3
JT
1440 }
1441 if (time_after(cfqq->slice_start, cfqq->dispatch_start))
1442 *unaccounted_time += cfqq->slice_start -
1443 cfqq->dispatch_start;
dae739eb
VG
1444 }
1445
dae739eb
VG
1446 return slice_used;
1447}
1448
1449static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
e5ff082e 1450 struct cfq_queue *cfqq)
dae739eb
VG
1451{
1452 struct cfq_rb_root *st = &cfqd->grp_service_tree;
167400d3 1453 unsigned int used_sl, charge, unaccounted_sl = 0;
f26bd1f0
VG
1454 int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
1455 - cfqg->service_tree_idle.count;
1d3650f7 1456 unsigned int vfr;
f26bd1f0
VG
1457
1458 BUG_ON(nr_sync < 0);
167400d3 1459 used_sl = charge = cfq_cfqq_slice_usage(cfqq, &unaccounted_sl);
dae739eb 1460
02b35081
VG
1461 if (iops_mode(cfqd))
1462 charge = cfqq->slice_dispatch;
1463 else if (!cfq_cfqq_sync(cfqq) && !nr_sync)
1464 charge = cfqq->allocated_slice;
dae739eb 1465
1d3650f7
TH
1466 /*
1467 * Can't update vdisktime while on service tree and cfqg->vfraction
1468 * is valid only while on it. Cache vfr, leave the service tree,
1469 * update vdisktime and go back on. The re-addition to the tree
1470 * will also update the weights as necessary.
1471 */
1472 vfr = cfqg->vfraction;
8184f93e 1473 cfq_group_service_tree_del(st, cfqg);
1d3650f7 1474 cfqg->vdisktime += cfqg_scale_charge(charge, vfr);
8184f93e 1475 cfq_group_service_tree_add(st, cfqg);
dae739eb
VG
1476
1477 /* This group is being expired. Save the context */
1478 if (time_after(cfqd->workload_expires, jiffies)) {
4d2ceea4 1479 cfqg->saved_wl_slice = cfqd->workload_expires
dae739eb 1480 - jiffies;
4d2ceea4
VG
1481 cfqg->saved_wl_type = cfqd->serving_wl_type;
1482 cfqg->saved_wl_class = cfqd->serving_wl_class;
dae739eb 1483 } else
4d2ceea4 1484 cfqg->saved_wl_slice = 0;
2868ef7b
VG
1485
1486 cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
1487 st->min_vdisktime);
fd16d263
JP
1488 cfq_log_cfqq(cfqq->cfqd, cfqq,
1489 "sl_used=%u disp=%u charge=%u iops=%u sect=%lu",
1490 used_sl, cfqq->slice_dispatch, charge,
1491 iops_mode(cfqd), cfqq->nr_sectors);
155fead9
TH
1492 cfqg_stats_update_timeslice_used(cfqg, used_sl, unaccounted_sl);
1493 cfqg_stats_set_start_empty_time(cfqg);
1fa8f6d6
VG
1494}
1495
f51b802c
TH
1496/**
1497 * cfq_init_cfqg_base - initialize base part of a cfq_group
1498 * @cfqg: cfq_group to initialize
1499 *
1500 * Initialize the base part which is used whether %CONFIG_CFQ_GROUP_IOSCHED
1501 * is enabled or not.
1502 */
1503static void cfq_init_cfqg_base(struct cfq_group *cfqg)
1504{
1505 struct cfq_rb_root *st;
1506 int i, j;
1507
1508 for_each_cfqg_st(cfqg, i, j, st)
1509 *st = CFQ_RB_ROOT;
1510 RB_CLEAR_NODE(&cfqg->rb_node);
1511
1512 cfqg->ttime.last_end_request = jiffies;
1513}
1514
25fb5169 1515#ifdef CONFIG_CFQ_GROUP_IOSCHED
90d3839b
PZ
1516static void cfqg_stats_init(struct cfqg_stats *stats)
1517{
1518 blkg_rwstat_init(&stats->service_bytes);
1519 blkg_rwstat_init(&stats->serviced);
1520 blkg_rwstat_init(&stats->merged);
1521 blkg_rwstat_init(&stats->service_time);
1522 blkg_rwstat_init(&stats->wait_time);
1523 blkg_rwstat_init(&stats->queued);
1524
1525 blkg_stat_init(&stats->sectors);
1526 blkg_stat_init(&stats->time);
1527
1528#ifdef CONFIG_DEBUG_BLK_CGROUP
1529 blkg_stat_init(&stats->unaccounted_time);
1530 blkg_stat_init(&stats->avg_queue_size_sum);
1531 blkg_stat_init(&stats->avg_queue_size_samples);
1532 blkg_stat_init(&stats->dequeue);
1533 blkg_stat_init(&stats->group_wait_time);
1534 blkg_stat_init(&stats->idle_time);
1535 blkg_stat_init(&stats->empty_time);
1536#endif
1537}
1538
3c798398 1539static void cfq_pd_init(struct blkcg_gq *blkg)
f469a7b4 1540{
0381411e 1541 struct cfq_group *cfqg = blkg_to_cfqg(blkg);
25fb5169 1542
f51b802c 1543 cfq_init_cfqg_base(cfqg);
3381cb8d 1544 cfqg->weight = blkg->blkcg->cfq_weight;
e71357e1 1545 cfqg->leaf_weight = blkg->blkcg->cfq_leaf_weight;
90d3839b
PZ
1546 cfqg_stats_init(&cfqg->stats);
1547 cfqg_stats_init(&cfqg->dead_stats);
25fb5169
VG
1548}
1549
0b39920b
TH
1550static void cfq_pd_offline(struct blkcg_gq *blkg)
1551{
1552 /*
1553 * @blkg is going offline and will be ignored by
1554 * blkg_[rw]stat_recursive_sum(). Transfer stats to the parent so
1555 * that they don't get lost. If IOs complete after this point, the
1556 * stats for them will be lost. Oh well...
1557 */
1558 cfqg_stats_xfer_dead(blkg_to_cfqg(blkg));
1559}
1560
43114018
TH
1561/* offset delta from cfqg->stats to cfqg->dead_stats */
1562static const int dead_stats_off_delta = offsetof(struct cfq_group, dead_stats) -
1563 offsetof(struct cfq_group, stats);
1564
1565/* to be used by recursive prfill, sums live and dead stats recursively */
1566static u64 cfqg_stat_pd_recursive_sum(struct blkg_policy_data *pd, int off)
1567{
1568 u64 sum = 0;
1569
1570 sum += blkg_stat_recursive_sum(pd, off);
1571 sum += blkg_stat_recursive_sum(pd, off + dead_stats_off_delta);
1572 return sum;
1573}
1574
1575/* to be used by recursive prfill, sums live and dead rwstats recursively */
1576static struct blkg_rwstat cfqg_rwstat_pd_recursive_sum(struct blkg_policy_data *pd,
1577 int off)
1578{
1579 struct blkg_rwstat a, b;
1580
1581 a = blkg_rwstat_recursive_sum(pd, off);
1582 b = blkg_rwstat_recursive_sum(pd, off + dead_stats_off_delta);
1583 blkg_rwstat_merge(&a, &b);
1584 return a;
1585}
1586
689665af
TH
1587static void cfq_pd_reset_stats(struct blkcg_gq *blkg)
1588{
1589 struct cfq_group *cfqg = blkg_to_cfqg(blkg);
1590
1591 cfqg_stats_reset(&cfqg->stats);
0b39920b 1592 cfqg_stats_reset(&cfqg->dead_stats);
25fb5169
VG
1593}
1594
1595/*
3e59cf9d
VG
1596 * Search for the cfq group current task belongs to. request_queue lock must
1597 * be held.
25fb5169 1598 */
cd1604fa 1599static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd,
3c798398 1600 struct blkcg *blkcg)
25fb5169 1601{
f469a7b4 1602 struct request_queue *q = cfqd->queue;
cd1604fa 1603 struct cfq_group *cfqg = NULL;
25fb5169 1604
3c798398
TH
1605 /* avoid lookup for the common case where there's no blkcg */
1606 if (blkcg == &blkcg_root) {
cd1604fa
TH
1607 cfqg = cfqd->root_group;
1608 } else {
3c798398 1609 struct blkcg_gq *blkg;
f469a7b4 1610
3c96cb32 1611 blkg = blkg_lookup_create(blkcg, q);
cd1604fa 1612 if (!IS_ERR(blkg))
0381411e 1613 cfqg = blkg_to_cfqg(blkg);
cd1604fa 1614 }
f469a7b4 1615
25fb5169
VG
1616 return cfqg;
1617}
1618
1619static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
1620{
1621 /* Currently, all async queues are mapped to root group */
1622 if (!cfq_cfqq_sync(cfqq))
f51b802c 1623 cfqg = cfqq->cfqd->root_group;
25fb5169
VG
1624
1625 cfqq->cfqg = cfqg;
b1c35769 1626 /* cfqq reference on cfqg */
eb7d8c07 1627 cfqg_get(cfqg);
b1c35769
VG
1628}
1629
f95a04af
TH
1630static u64 cfqg_prfill_weight_device(struct seq_file *sf,
1631 struct blkg_policy_data *pd, int off)
60c2bc2d 1632{
f95a04af 1633 struct cfq_group *cfqg = pd_to_cfqg(pd);
3381cb8d
TH
1634
1635 if (!cfqg->dev_weight)
60c2bc2d 1636 return 0;
f95a04af 1637 return __blkg_prfill_u64(sf, pd, cfqg->dev_weight);
60c2bc2d
TH
1638}
1639
2da8ca82 1640static int cfqg_print_weight_device(struct seq_file *sf, void *v)
60c2bc2d 1641{
2da8ca82
TH
1642 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1643 cfqg_prfill_weight_device, &blkcg_policy_cfq,
1644 0, false);
60c2bc2d
TH
1645 return 0;
1646}
1647
e71357e1
TH
1648static u64 cfqg_prfill_leaf_weight_device(struct seq_file *sf,
1649 struct blkg_policy_data *pd, int off)
1650{
1651 struct cfq_group *cfqg = pd_to_cfqg(pd);
1652
1653 if (!cfqg->dev_leaf_weight)
1654 return 0;
1655 return __blkg_prfill_u64(sf, pd, cfqg->dev_leaf_weight);
1656}
1657
2da8ca82 1658static int cfqg_print_leaf_weight_device(struct seq_file *sf, void *v)
e71357e1 1659{
2da8ca82
TH
1660 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1661 cfqg_prfill_leaf_weight_device, &blkcg_policy_cfq,
1662 0, false);
e71357e1
TH
1663 return 0;
1664}
1665
2da8ca82 1666static int cfq_print_weight(struct seq_file *sf, void *v)
60c2bc2d 1667{
2da8ca82 1668 seq_printf(sf, "%u\n", css_to_blkcg(seq_css(sf))->cfq_weight);
60c2bc2d
TH
1669 return 0;
1670}
1671
2da8ca82 1672static int cfq_print_leaf_weight(struct seq_file *sf, void *v)
e71357e1 1673{
2da8ca82 1674 seq_printf(sf, "%u\n", css_to_blkcg(seq_css(sf))->cfq_leaf_weight);
e71357e1
TH
1675 return 0;
1676}
1677
451af504
TH
1678static ssize_t __cfqg_set_weight_device(struct kernfs_open_file *of,
1679 char *buf, size_t nbytes, loff_t off,
1680 bool is_leaf_weight)
60c2bc2d 1681{
451af504 1682 struct blkcg *blkcg = css_to_blkcg(of_css(of));
60c2bc2d 1683 struct blkg_conf_ctx ctx;
3381cb8d 1684 struct cfq_group *cfqg;
60c2bc2d
TH
1685 int ret;
1686
3c798398 1687 ret = blkg_conf_prep(blkcg, &blkcg_policy_cfq, buf, &ctx);
60c2bc2d
TH
1688 if (ret)
1689 return ret;
1690
1691 ret = -EINVAL;
3381cb8d 1692 cfqg = blkg_to_cfqg(ctx.blkg);
a2b1693b 1693 if (!ctx.v || (ctx.v >= CFQ_WEIGHT_MIN && ctx.v <= CFQ_WEIGHT_MAX)) {
e71357e1
TH
1694 if (!is_leaf_weight) {
1695 cfqg->dev_weight = ctx.v;
1696 cfqg->new_weight = ctx.v ?: blkcg->cfq_weight;
1697 } else {
1698 cfqg->dev_leaf_weight = ctx.v;
1699 cfqg->new_leaf_weight = ctx.v ?: blkcg->cfq_leaf_weight;
1700 }
60c2bc2d
TH
1701 ret = 0;
1702 }
1703
1704 blkg_conf_finish(&ctx);
451af504 1705 return ret ?: nbytes;
60c2bc2d
TH
1706}
1707
451af504
TH
1708static ssize_t cfqg_set_weight_device(struct kernfs_open_file *of,
1709 char *buf, size_t nbytes, loff_t off)
e71357e1 1710{
451af504 1711 return __cfqg_set_weight_device(of, buf, nbytes, off, false);
e71357e1
TH
1712}
1713
451af504
TH
1714static ssize_t cfqg_set_leaf_weight_device(struct kernfs_open_file *of,
1715 char *buf, size_t nbytes, loff_t off)
e71357e1 1716{
451af504 1717 return __cfqg_set_weight_device(of, buf, nbytes, off, true);
e71357e1
TH
1718}
1719
182446d0
TH
1720static int __cfq_set_weight(struct cgroup_subsys_state *css, struct cftype *cft,
1721 u64 val, bool is_leaf_weight)
60c2bc2d 1722{
182446d0 1723 struct blkcg *blkcg = css_to_blkcg(css);
3c798398 1724 struct blkcg_gq *blkg;
60c2bc2d 1725
3381cb8d 1726 if (val < CFQ_WEIGHT_MIN || val > CFQ_WEIGHT_MAX)
60c2bc2d
TH
1727 return -EINVAL;
1728
1729 spin_lock_irq(&blkcg->lock);
e71357e1
TH
1730
1731 if (!is_leaf_weight)
1732 blkcg->cfq_weight = val;
1733 else
1734 blkcg->cfq_leaf_weight = val;
60c2bc2d 1735
b67bfe0d 1736 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
3381cb8d 1737 struct cfq_group *cfqg = blkg_to_cfqg(blkg);
60c2bc2d 1738
e71357e1
TH
1739 if (!cfqg)
1740 continue;
1741
1742 if (!is_leaf_weight) {
1743 if (!cfqg->dev_weight)
1744 cfqg->new_weight = blkcg->cfq_weight;
1745 } else {
1746 if (!cfqg->dev_leaf_weight)
1747 cfqg->new_leaf_weight = blkcg->cfq_leaf_weight;
1748 }
60c2bc2d
TH
1749 }
1750
1751 spin_unlock_irq(&blkcg->lock);
1752 return 0;
1753}
1754
182446d0
TH
1755static int cfq_set_weight(struct cgroup_subsys_state *css, struct cftype *cft,
1756 u64 val)
e71357e1 1757{
182446d0 1758 return __cfq_set_weight(css, cft, val, false);
e71357e1
TH
1759}
1760
182446d0
TH
1761static int cfq_set_leaf_weight(struct cgroup_subsys_state *css,
1762 struct cftype *cft, u64 val)
e71357e1 1763{
182446d0 1764 return __cfq_set_weight(css, cft, val, true);
e71357e1
TH
1765}
1766
2da8ca82 1767static int cfqg_print_stat(struct seq_file *sf, void *v)
5bc4afb1 1768{
2da8ca82
TH
1769 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_stat,
1770 &blkcg_policy_cfq, seq_cft(sf)->private, false);
5bc4afb1
TH
1771 return 0;
1772}
1773
2da8ca82 1774static int cfqg_print_rwstat(struct seq_file *sf, void *v)
5bc4afb1 1775{
2da8ca82
TH
1776 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_rwstat,
1777 &blkcg_policy_cfq, seq_cft(sf)->private, true);
5bc4afb1
TH
1778 return 0;
1779}
1780
43114018
TH
1781static u64 cfqg_prfill_stat_recursive(struct seq_file *sf,
1782 struct blkg_policy_data *pd, int off)
1783{
1784 u64 sum = cfqg_stat_pd_recursive_sum(pd, off);
1785
1786 return __blkg_prfill_u64(sf, pd, sum);
1787}
1788
1789static u64 cfqg_prfill_rwstat_recursive(struct seq_file *sf,
1790 struct blkg_policy_data *pd, int off)
1791{
1792 struct blkg_rwstat sum = cfqg_rwstat_pd_recursive_sum(pd, off);
1793
1794 return __blkg_prfill_rwstat(sf, pd, &sum);
1795}
1796
2da8ca82 1797static int cfqg_print_stat_recursive(struct seq_file *sf, void *v)
43114018 1798{
2da8ca82
TH
1799 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1800 cfqg_prfill_stat_recursive, &blkcg_policy_cfq,
1801 seq_cft(sf)->private, false);
43114018
TH
1802 return 0;
1803}
1804
2da8ca82 1805static int cfqg_print_rwstat_recursive(struct seq_file *sf, void *v)
43114018 1806{
2da8ca82
TH
1807 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1808 cfqg_prfill_rwstat_recursive, &blkcg_policy_cfq,
1809 seq_cft(sf)->private, true);
43114018
TH
1810 return 0;
1811}
1812
60c2bc2d 1813#ifdef CONFIG_DEBUG_BLK_CGROUP
f95a04af
TH
1814static u64 cfqg_prfill_avg_queue_size(struct seq_file *sf,
1815 struct blkg_policy_data *pd, int off)
60c2bc2d 1816{
f95a04af 1817 struct cfq_group *cfqg = pd_to_cfqg(pd);
155fead9 1818 u64 samples = blkg_stat_read(&cfqg->stats.avg_queue_size_samples);
60c2bc2d
TH
1819 u64 v = 0;
1820
1821 if (samples) {
155fead9 1822 v = blkg_stat_read(&cfqg->stats.avg_queue_size_sum);
f3cff25f 1823 v = div64_u64(v, samples);
60c2bc2d 1824 }
f95a04af 1825 __blkg_prfill_u64(sf, pd, v);
60c2bc2d
TH
1826 return 0;
1827}
1828
1829/* print avg_queue_size */
2da8ca82 1830static int cfqg_print_avg_queue_size(struct seq_file *sf, void *v)
60c2bc2d 1831{
2da8ca82
TH
1832 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1833 cfqg_prfill_avg_queue_size, &blkcg_policy_cfq,
1834 0, false);
60c2bc2d
TH
1835 return 0;
1836}
1837#endif /* CONFIG_DEBUG_BLK_CGROUP */
1838
1839static struct cftype cfq_blkcg_files[] = {
1d3650f7 1840 /* on root, weight is mapped to leaf_weight */
60c2bc2d
TH
1841 {
1842 .name = "weight_device",
1d3650f7 1843 .flags = CFTYPE_ONLY_ON_ROOT,
2da8ca82 1844 .seq_show = cfqg_print_leaf_weight_device,
451af504 1845 .write = cfqg_set_leaf_weight_device,
60c2bc2d
TH
1846 },
1847 {
1848 .name = "weight",
1d3650f7 1849 .flags = CFTYPE_ONLY_ON_ROOT,
2da8ca82 1850 .seq_show = cfq_print_leaf_weight,
1d3650f7 1851 .write_u64 = cfq_set_leaf_weight,
60c2bc2d 1852 },
e71357e1 1853
1d3650f7 1854 /* no such mapping necessary for !roots */
60c2bc2d
TH
1855 {
1856 .name = "weight_device",
1d3650f7 1857 .flags = CFTYPE_NOT_ON_ROOT,
2da8ca82 1858 .seq_show = cfqg_print_weight_device,
451af504 1859 .write = cfqg_set_weight_device,
60c2bc2d
TH
1860 },
1861 {
1862 .name = "weight",
1d3650f7 1863 .flags = CFTYPE_NOT_ON_ROOT,
2da8ca82 1864 .seq_show = cfq_print_weight,
3381cb8d 1865 .write_u64 = cfq_set_weight,
60c2bc2d 1866 },
e71357e1 1867
e71357e1
TH
1868 {
1869 .name = "leaf_weight_device",
2da8ca82 1870 .seq_show = cfqg_print_leaf_weight_device,
451af504 1871 .write = cfqg_set_leaf_weight_device,
e71357e1
TH
1872 },
1873 {
1874 .name = "leaf_weight",
2da8ca82 1875 .seq_show = cfq_print_leaf_weight,
e71357e1
TH
1876 .write_u64 = cfq_set_leaf_weight,
1877 },
1878
43114018 1879 /* statistics, covers only the tasks in the cfqg */
60c2bc2d
TH
1880 {
1881 .name = "time",
5bc4afb1 1882 .private = offsetof(struct cfq_group, stats.time),
2da8ca82 1883 .seq_show = cfqg_print_stat,
60c2bc2d
TH
1884 },
1885 {
1886 .name = "sectors",
5bc4afb1 1887 .private = offsetof(struct cfq_group, stats.sectors),
2da8ca82 1888 .seq_show = cfqg_print_stat,
60c2bc2d
TH
1889 },
1890 {
1891 .name = "io_service_bytes",
5bc4afb1 1892 .private = offsetof(struct cfq_group, stats.service_bytes),
2da8ca82 1893 .seq_show = cfqg_print_rwstat,
60c2bc2d
TH
1894 },
1895 {
1896 .name = "io_serviced",
5bc4afb1 1897 .private = offsetof(struct cfq_group, stats.serviced),
2da8ca82 1898 .seq_show = cfqg_print_rwstat,
60c2bc2d
TH
1899 },
1900 {
1901 .name = "io_service_time",
5bc4afb1 1902 .private = offsetof(struct cfq_group, stats.service_time),
2da8ca82 1903 .seq_show = cfqg_print_rwstat,
60c2bc2d
TH
1904 },
1905 {
1906 .name = "io_wait_time",
5bc4afb1 1907 .private = offsetof(struct cfq_group, stats.wait_time),
2da8ca82 1908 .seq_show = cfqg_print_rwstat,
60c2bc2d
TH
1909 },
1910 {
1911 .name = "io_merged",
5bc4afb1 1912 .private = offsetof(struct cfq_group, stats.merged),
2da8ca82 1913 .seq_show = cfqg_print_rwstat,
60c2bc2d
TH
1914 },
1915 {
1916 .name = "io_queued",
5bc4afb1 1917 .private = offsetof(struct cfq_group, stats.queued),
2da8ca82 1918 .seq_show = cfqg_print_rwstat,
60c2bc2d 1919 },
43114018
TH
1920
1921 /* the same statictics which cover the cfqg and its descendants */
1922 {
1923 .name = "time_recursive",
1924 .private = offsetof(struct cfq_group, stats.time),
2da8ca82 1925 .seq_show = cfqg_print_stat_recursive,
43114018
TH
1926 },
1927 {
1928 .name = "sectors_recursive",
1929 .private = offsetof(struct cfq_group, stats.sectors),
2da8ca82 1930 .seq_show = cfqg_print_stat_recursive,
43114018
TH
1931 },
1932 {
1933 .name = "io_service_bytes_recursive",
1934 .private = offsetof(struct cfq_group, stats.service_bytes),
2da8ca82 1935 .seq_show = cfqg_print_rwstat_recursive,
43114018
TH
1936 },
1937 {
1938 .name = "io_serviced_recursive",
1939 .private = offsetof(struct cfq_group, stats.serviced),
2da8ca82 1940 .seq_show = cfqg_print_rwstat_recursive,
43114018
TH
1941 },
1942 {
1943 .name = "io_service_time_recursive",
1944 .private = offsetof(struct cfq_group, stats.service_time),
2da8ca82 1945 .seq_show = cfqg_print_rwstat_recursive,
43114018
TH
1946 },
1947 {
1948 .name = "io_wait_time_recursive",
1949 .private = offsetof(struct cfq_group, stats.wait_time),
2da8ca82 1950 .seq_show = cfqg_print_rwstat_recursive,
43114018
TH
1951 },
1952 {
1953 .name = "io_merged_recursive",
1954 .private = offsetof(struct cfq_group, stats.merged),
2da8ca82 1955 .seq_show = cfqg_print_rwstat_recursive,
43114018
TH
1956 },
1957 {
1958 .name = "io_queued_recursive",
1959 .private = offsetof(struct cfq_group, stats.queued),
2da8ca82 1960 .seq_show = cfqg_print_rwstat_recursive,
43114018 1961 },
60c2bc2d
TH
1962#ifdef CONFIG_DEBUG_BLK_CGROUP
1963 {
1964 .name = "avg_queue_size",
2da8ca82 1965 .seq_show = cfqg_print_avg_queue_size,
60c2bc2d
TH
1966 },
1967 {
1968 .name = "group_wait_time",
5bc4afb1 1969 .private = offsetof(struct cfq_group, stats.group_wait_time),
2da8ca82 1970 .seq_show = cfqg_print_stat,
60c2bc2d
TH
1971 },
1972 {
1973 .name = "idle_time",
5bc4afb1 1974 .private = offsetof(struct cfq_group, stats.idle_time),
2da8ca82 1975 .seq_show = cfqg_print_stat,
60c2bc2d
TH
1976 },
1977 {
1978 .name = "empty_time",
5bc4afb1 1979 .private = offsetof(struct cfq_group, stats.empty_time),
2da8ca82 1980 .seq_show = cfqg_print_stat,
60c2bc2d
TH
1981 },
1982 {
1983 .name = "dequeue",
5bc4afb1 1984 .private = offsetof(struct cfq_group, stats.dequeue),
2da8ca82 1985 .seq_show = cfqg_print_stat,
60c2bc2d
TH
1986 },
1987 {
1988 .name = "unaccounted_time",
5bc4afb1 1989 .private = offsetof(struct cfq_group, stats.unaccounted_time),
2da8ca82 1990 .seq_show = cfqg_print_stat,
60c2bc2d
TH
1991 },
1992#endif /* CONFIG_DEBUG_BLK_CGROUP */
1993 { } /* terminate */
1994};
25fb5169 1995#else /* GROUP_IOSCHED */
cd1604fa 1996static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd,
3c798398 1997 struct blkcg *blkcg)
25fb5169 1998{
f51b802c 1999 return cfqd->root_group;
25fb5169 2000}
7f1dc8a2 2001
25fb5169
VG
2002static inline void
2003cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) {
2004 cfqq->cfqg = cfqg;
2005}
2006
2007#endif /* GROUP_IOSCHED */
2008
498d3aa2 2009/*
c0324a02 2010 * The cfqd->service_trees holds all pending cfq_queue's that have
498d3aa2
JA
2011 * requests waiting to be processed. It is sorted in the order that
2012 * we will service the queues.
2013 */
a36e71f9 2014static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
a6151c3a 2015 bool add_front)
d9e7620e 2016{
0871714e
JA
2017 struct rb_node **p, *parent;
2018 struct cfq_queue *__cfqq;
d9e7620e 2019 unsigned long rb_key;
34b98d03 2020 struct cfq_rb_root *st;
498d3aa2 2021 int left;
dae739eb 2022 int new_cfqq = 1;
ae30c286 2023
34b98d03 2024 st = st_for(cfqq->cfqg, cfqq_class(cfqq), cfqq_type(cfqq));
0871714e
JA
2025 if (cfq_class_idle(cfqq)) {
2026 rb_key = CFQ_IDLE_DELAY;
34b98d03 2027 parent = rb_last(&st->rb);
0871714e
JA
2028 if (parent && parent != &cfqq->rb_node) {
2029 __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
2030 rb_key += __cfqq->rb_key;
2031 } else
2032 rb_key += jiffies;
2033 } else if (!add_front) {
b9c8946b
JA
2034 /*
2035 * Get our rb key offset. Subtract any residual slice
2036 * value carried from last service. A negative resid
2037 * count indicates slice overrun, and this should position
2038 * the next service time further away in the tree.
2039 */
edd75ffd 2040 rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
b9c8946b 2041 rb_key -= cfqq->slice_resid;
edd75ffd 2042 cfqq->slice_resid = 0;
48e025e6
CZ
2043 } else {
2044 rb_key = -HZ;
34b98d03 2045 __cfqq = cfq_rb_first(st);
48e025e6
CZ
2046 rb_key += __cfqq ? __cfqq->rb_key : jiffies;
2047 }
1da177e4 2048
d9e7620e 2049 if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
dae739eb 2050 new_cfqq = 0;
99f9628a 2051 /*
d9e7620e 2052 * same position, nothing more to do
99f9628a 2053 */
34b98d03 2054 if (rb_key == cfqq->rb_key && cfqq->service_tree == st)
d9e7620e 2055 return;
1da177e4 2056
aa6f6a3d
CZ
2057 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
2058 cfqq->service_tree = NULL;
1da177e4 2059 }
d9e7620e 2060
498d3aa2 2061 left = 1;
0871714e 2062 parent = NULL;
34b98d03
VG
2063 cfqq->service_tree = st;
2064 p = &st->rb.rb_node;
d9e7620e
JA
2065 while (*p) {
2066 parent = *p;
2067 __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
2068
0c534e0a 2069 /*
c0324a02 2070 * sort by key, that represents service time.
0c534e0a 2071 */
c0324a02 2072 if (time_before(rb_key, __cfqq->rb_key))
1f23f121 2073 p = &parent->rb_left;
c0324a02 2074 else {
1f23f121 2075 p = &parent->rb_right;
cc09e299 2076 left = 0;
c0324a02 2077 }
d9e7620e
JA
2078 }
2079
cc09e299 2080 if (left)
34b98d03 2081 st->left = &cfqq->rb_node;
cc09e299 2082
d9e7620e
JA
2083 cfqq->rb_key = rb_key;
2084 rb_link_node(&cfqq->rb_node, parent, p);
34b98d03
VG
2085 rb_insert_color(&cfqq->rb_node, &st->rb);
2086 st->count++;
20359f27 2087 if (add_front || !new_cfqq)
dae739eb 2088 return;
8184f93e 2089 cfq_group_notify_queue_add(cfqd, cfqq->cfqg);
1da177e4
LT
2090}
2091
a36e71f9 2092static struct cfq_queue *
f2d1f0ae
JA
2093cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root,
2094 sector_t sector, struct rb_node **ret_parent,
2095 struct rb_node ***rb_link)
a36e71f9 2096{
a36e71f9
JA
2097 struct rb_node **p, *parent;
2098 struct cfq_queue *cfqq = NULL;
2099
2100 parent = NULL;
2101 p = &root->rb_node;
2102 while (*p) {
2103 struct rb_node **n;
2104
2105 parent = *p;
2106 cfqq = rb_entry(parent, struct cfq_queue, p_node);
2107
2108 /*
2109 * Sort strictly based on sector. Smallest to the left,
2110 * largest to the right.
2111 */
2e46e8b2 2112 if (sector > blk_rq_pos(cfqq->next_rq))
a36e71f9 2113 n = &(*p)->rb_right;
2e46e8b2 2114 else if (sector < blk_rq_pos(cfqq->next_rq))
a36e71f9
JA
2115 n = &(*p)->rb_left;
2116 else
2117 break;
2118 p = n;
3ac6c9f8 2119 cfqq = NULL;
a36e71f9
JA
2120 }
2121
2122 *ret_parent = parent;
2123 if (rb_link)
2124 *rb_link = p;
3ac6c9f8 2125 return cfqq;
a36e71f9
JA
2126}
2127
2128static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2129{
a36e71f9
JA
2130 struct rb_node **p, *parent;
2131 struct cfq_queue *__cfqq;
2132
f2d1f0ae
JA
2133 if (cfqq->p_root) {
2134 rb_erase(&cfqq->p_node, cfqq->p_root);
2135 cfqq->p_root = NULL;
2136 }
a36e71f9
JA
2137
2138 if (cfq_class_idle(cfqq))
2139 return;
2140 if (!cfqq->next_rq)
2141 return;
2142
f2d1f0ae 2143 cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio];
2e46e8b2
TH
2144 __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root,
2145 blk_rq_pos(cfqq->next_rq), &parent, &p);
3ac6c9f8
JA
2146 if (!__cfqq) {
2147 rb_link_node(&cfqq->p_node, parent, p);
f2d1f0ae
JA
2148 rb_insert_color(&cfqq->p_node, cfqq->p_root);
2149 } else
2150 cfqq->p_root = NULL;
a36e71f9
JA
2151}
2152
498d3aa2
JA
2153/*
2154 * Update cfqq's position in the service tree.
2155 */
edd75ffd 2156static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
6d048f53 2157{
6d048f53
JA
2158 /*
2159 * Resorting requires the cfqq to be on the RR list already.
2160 */
a36e71f9 2161 if (cfq_cfqq_on_rr(cfqq)) {
edd75ffd 2162 cfq_service_tree_add(cfqd, cfqq, 0);
a36e71f9
JA
2163 cfq_prio_tree_add(cfqd, cfqq);
2164 }
6d048f53
JA
2165}
2166
1da177e4
LT
2167/*
2168 * add to busy list of queues for service, trying to be fair in ordering
22e2c507 2169 * the pending list according to last request service
1da177e4 2170 */
febffd61 2171static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1da177e4 2172{
7b679138 2173 cfq_log_cfqq(cfqd, cfqq, "add_to_rr");
3b18152c
JA
2174 BUG_ON(cfq_cfqq_on_rr(cfqq));
2175 cfq_mark_cfqq_on_rr(cfqq);
1da177e4 2176 cfqd->busy_queues++;
ef8a41df
SL
2177 if (cfq_cfqq_sync(cfqq))
2178 cfqd->busy_sync_queues++;
1da177e4 2179
edd75ffd 2180 cfq_resort_rr_list(cfqd, cfqq);
1da177e4
LT
2181}
2182
498d3aa2
JA
2183/*
2184 * Called when the cfqq no longer has requests pending, remove it from
2185 * the service tree.
2186 */
febffd61 2187static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1da177e4 2188{
7b679138 2189 cfq_log_cfqq(cfqd, cfqq, "del_from_rr");
3b18152c
JA
2190 BUG_ON(!cfq_cfqq_on_rr(cfqq));
2191 cfq_clear_cfqq_on_rr(cfqq);
1da177e4 2192
aa6f6a3d
CZ
2193 if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
2194 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
2195 cfqq->service_tree = NULL;
2196 }
f2d1f0ae
JA
2197 if (cfqq->p_root) {
2198 rb_erase(&cfqq->p_node, cfqq->p_root);
2199 cfqq->p_root = NULL;
2200 }
d9e7620e 2201
8184f93e 2202 cfq_group_notify_queue_del(cfqd, cfqq->cfqg);
1da177e4
LT
2203 BUG_ON(!cfqd->busy_queues);
2204 cfqd->busy_queues--;
ef8a41df
SL
2205 if (cfq_cfqq_sync(cfqq))
2206 cfqd->busy_sync_queues--;
1da177e4
LT
2207}
2208
2209/*
2210 * rb tree support functions
2211 */
febffd61 2212static void cfq_del_rq_rb(struct request *rq)
1da177e4 2213{
5e705374 2214 struct cfq_queue *cfqq = RQ_CFQQ(rq);
5e705374 2215 const int sync = rq_is_sync(rq);
1da177e4 2216
b4878f24
JA
2217 BUG_ON(!cfqq->queued[sync]);
2218 cfqq->queued[sync]--;
1da177e4 2219
5e705374 2220 elv_rb_del(&cfqq->sort_list, rq);
1da177e4 2221
f04a6424
VG
2222 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) {
2223 /*
2224 * Queue will be deleted from service tree when we actually
2225 * expire it later. Right now just remove it from prio tree
2226 * as it is empty.
2227 */
2228 if (cfqq->p_root) {
2229 rb_erase(&cfqq->p_node, cfqq->p_root);
2230 cfqq->p_root = NULL;
2231 }
2232 }
1da177e4
LT
2233}
2234
5e705374 2235static void cfq_add_rq_rb(struct request *rq)
1da177e4 2236{
5e705374 2237 struct cfq_queue *cfqq = RQ_CFQQ(rq);
1da177e4 2238 struct cfq_data *cfqd = cfqq->cfqd;
796d5116 2239 struct request *prev;
1da177e4 2240
5380a101 2241 cfqq->queued[rq_is_sync(rq)]++;
1da177e4 2242
796d5116 2243 elv_rb_add(&cfqq->sort_list, rq);
5fccbf61
JA
2244
2245 if (!cfq_cfqq_on_rr(cfqq))
2246 cfq_add_cfqq_rr(cfqd, cfqq);
5044eed4
JA
2247
2248 /*
2249 * check if this request is a better next-serve candidate
2250 */
a36e71f9 2251 prev = cfqq->next_rq;
cf7c25cf 2252 cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq, cfqd->last_position);
a36e71f9
JA
2253
2254 /*
2255 * adjust priority tree position, if ->next_rq changes
2256 */
2257 if (prev != cfqq->next_rq)
2258 cfq_prio_tree_add(cfqd, cfqq);
2259
5044eed4 2260 BUG_ON(!cfqq->next_rq);
1da177e4
LT
2261}
2262
febffd61 2263static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
1da177e4 2264{
5380a101
JA
2265 elv_rb_del(&cfqq->sort_list, rq);
2266 cfqq->queued[rq_is_sync(rq)]--;
155fead9 2267 cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags);
5e705374 2268 cfq_add_rq_rb(rq);
155fead9
TH
2269 cfqg_stats_update_io_add(RQ_CFQG(rq), cfqq->cfqd->serving_group,
2270 rq->cmd_flags);
1da177e4
LT
2271}
2272
206dc69b
JA
2273static struct request *
2274cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
1da177e4 2275{
206dc69b 2276 struct task_struct *tsk = current;
c5869807 2277 struct cfq_io_cq *cic;
206dc69b 2278 struct cfq_queue *cfqq;
1da177e4 2279
4ac845a2 2280 cic = cfq_cic_lookup(cfqd, tsk->io_context);
91fac317
VT
2281 if (!cic)
2282 return NULL;
2283
2284 cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
f73a1c7d
KO
2285 if (cfqq)
2286 return elv_rb_find(&cfqq->sort_list, bio_end_sector(bio));
1da177e4 2287
1da177e4
LT
2288 return NULL;
2289}
2290
165125e1 2291static void cfq_activate_request(struct request_queue *q, struct request *rq)
1da177e4 2292{
22e2c507 2293 struct cfq_data *cfqd = q->elevator->elevator_data;
3b18152c 2294
53c583d2 2295 cfqd->rq_in_driver++;
7b679138 2296 cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
53c583d2 2297 cfqd->rq_in_driver);
25776e35 2298
5b93629b 2299 cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
1da177e4
LT
2300}
2301
165125e1 2302static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
1da177e4 2303{
b4878f24
JA
2304 struct cfq_data *cfqd = q->elevator->elevator_data;
2305
53c583d2
CZ
2306 WARN_ON(!cfqd->rq_in_driver);
2307 cfqd->rq_in_driver--;
7b679138 2308 cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d",
53c583d2 2309 cfqd->rq_in_driver);
1da177e4
LT
2310}
2311
b4878f24 2312static void cfq_remove_request(struct request *rq)
1da177e4 2313{
5e705374 2314 struct cfq_queue *cfqq = RQ_CFQQ(rq);
21183b07 2315
5e705374
JA
2316 if (cfqq->next_rq == rq)
2317 cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
1da177e4 2318
b4878f24 2319 list_del_init(&rq->queuelist);
5e705374 2320 cfq_del_rq_rb(rq);
374f84ac 2321
45333d5a 2322 cfqq->cfqd->rq_queued--;
155fead9 2323 cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags);
65299a3b
CH
2324 if (rq->cmd_flags & REQ_PRIO) {
2325 WARN_ON(!cfqq->prio_pending);
2326 cfqq->prio_pending--;
b53d1ed7 2327 }
1da177e4
LT
2328}
2329
165125e1
JA
2330static int cfq_merge(struct request_queue *q, struct request **req,
2331 struct bio *bio)
1da177e4
LT
2332{
2333 struct cfq_data *cfqd = q->elevator->elevator_data;
2334 struct request *__rq;
1da177e4 2335
206dc69b 2336 __rq = cfq_find_rq_fmerge(cfqd, bio);
22e2c507 2337 if (__rq && elv_rq_merge_ok(__rq, bio)) {
9817064b
JA
2338 *req = __rq;
2339 return ELEVATOR_FRONT_MERGE;
1da177e4
LT
2340 }
2341
2342 return ELEVATOR_NO_MERGE;
1da177e4
LT
2343}
2344
165125e1 2345static void cfq_merged_request(struct request_queue *q, struct request *req,
21183b07 2346 int type)
1da177e4 2347{
21183b07 2348 if (type == ELEVATOR_FRONT_MERGE) {
5e705374 2349 struct cfq_queue *cfqq = RQ_CFQQ(req);
1da177e4 2350
5e705374 2351 cfq_reposition_rq_rb(cfqq, req);
1da177e4 2352 }
1da177e4
LT
2353}
2354
812d4026
DS
2355static void cfq_bio_merged(struct request_queue *q, struct request *req,
2356 struct bio *bio)
2357{
155fead9 2358 cfqg_stats_update_io_merged(RQ_CFQG(req), bio->bi_rw);
812d4026
DS
2359}
2360
1da177e4 2361static void
165125e1 2362cfq_merged_requests(struct request_queue *q, struct request *rq,
1da177e4
LT
2363 struct request *next)
2364{
cf7c25cf 2365 struct cfq_queue *cfqq = RQ_CFQQ(rq);
4a0b75c7
SL
2366 struct cfq_data *cfqd = q->elevator->elevator_data;
2367
22e2c507
JA
2368 /*
2369 * reposition in fifo if next is older than rq
2370 */
2371 if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
8b4922d3 2372 time_before(next->fifo_time, rq->fifo_time) &&
3d106fba 2373 cfqq == RQ_CFQQ(next)) {
22e2c507 2374 list_move(&rq->queuelist, &next->queuelist);
8b4922d3 2375 rq->fifo_time = next->fifo_time;
30996f40 2376 }
22e2c507 2377
cf7c25cf
CZ
2378 if (cfqq->next_rq == next)
2379 cfqq->next_rq = rq;
b4878f24 2380 cfq_remove_request(next);
155fead9 2381 cfqg_stats_update_io_merged(RQ_CFQG(rq), next->cmd_flags);
4a0b75c7
SL
2382
2383 cfqq = RQ_CFQQ(next);
2384 /*
2385 * all requests of this queue are merged to other queues, delete it
2386 * from the service tree. If it's the active_queue,
2387 * cfq_dispatch_requests() will choose to expire it or do idle
2388 */
2389 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list) &&
2390 cfqq != cfqd->active_queue)
2391 cfq_del_cfqq_rr(cfqd, cfqq);
22e2c507
JA
2392}
2393
165125e1 2394static int cfq_allow_merge(struct request_queue *q, struct request *rq,
da775265
JA
2395 struct bio *bio)
2396{
2397 struct cfq_data *cfqd = q->elevator->elevator_data;
c5869807 2398 struct cfq_io_cq *cic;
da775265 2399 struct cfq_queue *cfqq;
da775265
JA
2400
2401 /*
ec8acb69 2402 * Disallow merge of a sync bio into an async request.
da775265 2403 */
91fac317 2404 if (cfq_bio_sync(bio) && !rq_is_sync(rq))
a6151c3a 2405 return false;
da775265
JA
2406
2407 /*
f1a4f4d3 2408 * Lookup the cfqq that this bio will be queued with and allow
07c2bd37 2409 * merge only if rq is queued there.
f1a4f4d3 2410 */
07c2bd37
TH
2411 cic = cfq_cic_lookup(cfqd, current->io_context);
2412 if (!cic)
2413 return false;
719d3402 2414
91fac317 2415 cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
a6151c3a 2416 return cfqq == RQ_CFQQ(rq);
da775265
JA
2417}
2418
812df48d
DS
2419static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2420{
2421 del_timer(&cfqd->idle_slice_timer);
155fead9 2422 cfqg_stats_update_idle_time(cfqq->cfqg);
812df48d
DS
2423}
2424
febffd61
JA
2425static void __cfq_set_active_queue(struct cfq_data *cfqd,
2426 struct cfq_queue *cfqq)
22e2c507
JA
2427{
2428 if (cfqq) {
3bf10fea 2429 cfq_log_cfqq(cfqd, cfqq, "set_active wl_class:%d wl_type:%d",
4d2ceea4 2430 cfqd->serving_wl_class, cfqd->serving_wl_type);
155fead9 2431 cfqg_stats_update_avg_queue_size(cfqq->cfqg);
62a37f6b
JT
2432 cfqq->slice_start = 0;
2433 cfqq->dispatch_start = jiffies;
2434 cfqq->allocated_slice = 0;
2435 cfqq->slice_end = 0;
2436 cfqq->slice_dispatch = 0;
2437 cfqq->nr_sectors = 0;
2438
2439 cfq_clear_cfqq_wait_request(cfqq);
2440 cfq_clear_cfqq_must_dispatch(cfqq);
2441 cfq_clear_cfqq_must_alloc_slice(cfqq);
2442 cfq_clear_cfqq_fifo_expire(cfqq);
2443 cfq_mark_cfqq_slice_new(cfqq);
2444
2445 cfq_del_timer(cfqd, cfqq);
22e2c507
JA
2446 }
2447
2448 cfqd->active_queue = cfqq;
2449}
2450
7b14e3b5
JA
2451/*
2452 * current cfqq expired its slice (or was too idle), select new one
2453 */
2454static void
2455__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
e5ff082e 2456 bool timed_out)
7b14e3b5 2457{
7b679138
JA
2458 cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
2459
7b14e3b5 2460 if (cfq_cfqq_wait_request(cfqq))
812df48d 2461 cfq_del_timer(cfqd, cfqq);
7b14e3b5 2462
7b14e3b5 2463 cfq_clear_cfqq_wait_request(cfqq);
f75edf2d 2464 cfq_clear_cfqq_wait_busy(cfqq);
7b14e3b5 2465
ae54abed
SL
2466 /*
2467 * If this cfqq is shared between multiple processes, check to
2468 * make sure that those processes are still issuing I/Os within
2469 * the mean seek distance. If not, it may be time to break the
2470 * queues apart again.
2471 */
2472 if (cfq_cfqq_coop(cfqq) && CFQQ_SEEKY(cfqq))
2473 cfq_mark_cfqq_split_coop(cfqq);
2474
7b14e3b5 2475 /*
6084cdda 2476 * store what was left of this slice, if the queue idled/timed out
7b14e3b5 2477 */
c553f8e3
SL
2478 if (timed_out) {
2479 if (cfq_cfqq_slice_new(cfqq))
ba5bd520 2480 cfqq->slice_resid = cfq_scaled_cfqq_slice(cfqd, cfqq);
c553f8e3
SL
2481 else
2482 cfqq->slice_resid = cfqq->slice_end - jiffies;
7b679138
JA
2483 cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid);
2484 }
7b14e3b5 2485
e5ff082e 2486 cfq_group_served(cfqd, cfqq->cfqg, cfqq);
dae739eb 2487
f04a6424
VG
2488 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
2489 cfq_del_cfqq_rr(cfqd, cfqq);
2490
edd75ffd 2491 cfq_resort_rr_list(cfqd, cfqq);
7b14e3b5
JA
2492
2493 if (cfqq == cfqd->active_queue)
2494 cfqd->active_queue = NULL;
2495
2496 if (cfqd->active_cic) {
11a3122f 2497 put_io_context(cfqd->active_cic->icq.ioc);
7b14e3b5
JA
2498 cfqd->active_cic = NULL;
2499 }
7b14e3b5
JA
2500}
2501
e5ff082e 2502static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
7b14e3b5
JA
2503{
2504 struct cfq_queue *cfqq = cfqd->active_queue;
2505
2506 if (cfqq)
e5ff082e 2507 __cfq_slice_expired(cfqd, cfqq, timed_out);
7b14e3b5
JA
2508}
2509
498d3aa2
JA
2510/*
2511 * Get next queue for service. Unless we have a queue preemption,
2512 * we'll simply select the first cfqq in the service tree.
2513 */
6d048f53 2514static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
22e2c507 2515{
34b98d03
VG
2516 struct cfq_rb_root *st = st_for(cfqd->serving_group,
2517 cfqd->serving_wl_class, cfqd->serving_wl_type);
d9e7620e 2518
f04a6424
VG
2519 if (!cfqd->rq_queued)
2520 return NULL;
2521
1fa8f6d6 2522 /* There is nothing to dispatch */
34b98d03 2523 if (!st)
1fa8f6d6 2524 return NULL;
34b98d03 2525 if (RB_EMPTY_ROOT(&st->rb))
c0324a02 2526 return NULL;
34b98d03 2527 return cfq_rb_first(st);
6d048f53
JA
2528}
2529
f04a6424
VG
2530static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd)
2531{
25fb5169 2532 struct cfq_group *cfqg;
f04a6424
VG
2533 struct cfq_queue *cfqq;
2534 int i, j;
2535 struct cfq_rb_root *st;
2536
2537 if (!cfqd->rq_queued)
2538 return NULL;
2539
25fb5169
VG
2540 cfqg = cfq_get_next_cfqg(cfqd);
2541 if (!cfqg)
2542 return NULL;
2543
f04a6424
VG
2544 for_each_cfqg_st(cfqg, i, j, st)
2545 if ((cfqq = cfq_rb_first(st)) != NULL)
2546 return cfqq;
2547 return NULL;
2548}
2549
498d3aa2
JA
2550/*
2551 * Get and set a new active queue for service.
2552 */
a36e71f9
JA
2553static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
2554 struct cfq_queue *cfqq)
6d048f53 2555{
e00ef799 2556 if (!cfqq)
a36e71f9 2557 cfqq = cfq_get_next_queue(cfqd);
6d048f53 2558
22e2c507 2559 __cfq_set_active_queue(cfqd, cfqq);
3b18152c 2560 return cfqq;
22e2c507
JA
2561}
2562
d9e7620e
JA
2563static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
2564 struct request *rq)
2565{
83096ebf
TH
2566 if (blk_rq_pos(rq) >= cfqd->last_position)
2567 return blk_rq_pos(rq) - cfqd->last_position;
d9e7620e 2568 else
83096ebf 2569 return cfqd->last_position - blk_rq_pos(rq);
d9e7620e
JA
2570}
2571
b2c18e1e 2572static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
e9ce335d 2573 struct request *rq)
6d048f53 2574{
e9ce335d 2575 return cfq_dist_from_last(cfqd, rq) <= CFQQ_CLOSE_THR;
6d048f53
JA
2576}
2577
a36e71f9
JA
2578static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
2579 struct cfq_queue *cur_cfqq)
2580{
f2d1f0ae 2581 struct rb_root *root = &cfqd->prio_trees[cur_cfqq->org_ioprio];
a36e71f9
JA
2582 struct rb_node *parent, *node;
2583 struct cfq_queue *__cfqq;
2584 sector_t sector = cfqd->last_position;
2585
2586 if (RB_EMPTY_ROOT(root))
2587 return NULL;
2588
2589 /*
2590 * First, if we find a request starting at the end of the last
2591 * request, choose it.
2592 */
f2d1f0ae 2593 __cfqq = cfq_prio_tree_lookup(cfqd, root, sector, &parent, NULL);
a36e71f9
JA
2594 if (__cfqq)
2595 return __cfqq;
2596
2597 /*
2598 * If the exact sector wasn't found, the parent of the NULL leaf
2599 * will contain the closest sector.
2600 */
2601 __cfqq = rb_entry(parent, struct cfq_queue, p_node);
e9ce335d 2602 if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
a36e71f9
JA
2603 return __cfqq;
2604
2e46e8b2 2605 if (blk_rq_pos(__cfqq->next_rq) < sector)
a36e71f9
JA
2606 node = rb_next(&__cfqq->p_node);
2607 else
2608 node = rb_prev(&__cfqq->p_node);
2609 if (!node)
2610 return NULL;
2611
2612 __cfqq = rb_entry(node, struct cfq_queue, p_node);
e9ce335d 2613 if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
a36e71f9
JA
2614 return __cfqq;
2615
2616 return NULL;
2617}
2618
2619/*
2620 * cfqd - obvious
2621 * cur_cfqq - passed in so that we don't decide that the current queue is
2622 * closely cooperating with itself.
2623 *
2624 * So, basically we're assuming that that cur_cfqq has dispatched at least
2625 * one request, and that cfqd->last_position reflects a position on the disk
2626 * associated with the I/O issued by cur_cfqq. I'm not sure this is a valid
2627 * assumption.
2628 */
2629static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
b3b6d040 2630 struct cfq_queue *cur_cfqq)
6d048f53 2631{
a36e71f9
JA
2632 struct cfq_queue *cfqq;
2633
39c01b21
DS
2634 if (cfq_class_idle(cur_cfqq))
2635 return NULL;
e6c5bc73
JM
2636 if (!cfq_cfqq_sync(cur_cfqq))
2637 return NULL;
2638 if (CFQQ_SEEKY(cur_cfqq))
2639 return NULL;
2640
b9d8f4c7
GJ
2641 /*
2642 * Don't search priority tree if it's the only queue in the group.
2643 */
2644 if (cur_cfqq->cfqg->nr_cfqq == 1)
2645 return NULL;
2646
6d048f53 2647 /*
d9e7620e
JA
2648 * We should notice if some of the queues are cooperating, eg
2649 * working closely on the same area of the disk. In that case,
2650 * we can group them together and don't waste time idling.
6d048f53 2651 */
a36e71f9
JA
2652 cfqq = cfqq_close(cfqd, cur_cfqq);
2653 if (!cfqq)
2654 return NULL;
2655
8682e1f1
VG
2656 /* If new queue belongs to different cfq_group, don't choose it */
2657 if (cur_cfqq->cfqg != cfqq->cfqg)
2658 return NULL;
2659
df5fe3e8
JM
2660 /*
2661 * It only makes sense to merge sync queues.
2662 */
2663 if (!cfq_cfqq_sync(cfqq))
2664 return NULL;
e6c5bc73
JM
2665 if (CFQQ_SEEKY(cfqq))
2666 return NULL;
df5fe3e8 2667
c0324a02
CZ
2668 /*
2669 * Do not merge queues of different priority classes
2670 */
2671 if (cfq_class_rt(cfqq) != cfq_class_rt(cur_cfqq))
2672 return NULL;
2673
a36e71f9 2674 return cfqq;
6d048f53
JA
2675}
2676
a6d44e98
CZ
2677/*
2678 * Determine whether we should enforce idle window for this queue.
2679 */
2680
2681static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2682{
3bf10fea 2683 enum wl_class_t wl_class = cfqq_class(cfqq);
34b98d03 2684 struct cfq_rb_root *st = cfqq->service_tree;
a6d44e98 2685
34b98d03
VG
2686 BUG_ON(!st);
2687 BUG_ON(!st->count);
f04a6424 2688
b6508c16
VG
2689 if (!cfqd->cfq_slice_idle)
2690 return false;
2691
a6d44e98 2692 /* We never do for idle class queues. */
3bf10fea 2693 if (wl_class == IDLE_WORKLOAD)
a6d44e98
CZ
2694 return false;
2695
2696 /* We do for queues that were marked with idle window flag. */
3c764b7a
SL
2697 if (cfq_cfqq_idle_window(cfqq) &&
2698 !(blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag))
a6d44e98
CZ
2699 return true;
2700
2701 /*
2702 * Otherwise, we do only if they are the last ones
2703 * in their service tree.
2704 */
34b98d03
VG
2705 if (st->count == 1 && cfq_cfqq_sync(cfqq) &&
2706 !cfq_io_thinktime_big(cfqd, &st->ttime, false))
c1e44756 2707 return true;
34b98d03 2708 cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d", st->count);
c1e44756 2709 return false;
a6d44e98
CZ
2710}
2711
6d048f53 2712static void cfq_arm_slice_timer(struct cfq_data *cfqd)
22e2c507 2713{
1792669c 2714 struct cfq_queue *cfqq = cfqd->active_queue;
c5869807 2715 struct cfq_io_cq *cic;
80bdf0c7 2716 unsigned long sl, group_idle = 0;
7b14e3b5 2717
a68bbddb 2718 /*
f7d7b7a7
JA
2719 * SSD device without seek penalty, disable idling. But only do so
2720 * for devices that support queuing, otherwise we still have a problem
2721 * with sync vs async workloads.
a68bbddb 2722 */
f7d7b7a7 2723 if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)
a68bbddb
JA
2724 return;
2725
dd67d051 2726 WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
6d048f53 2727 WARN_ON(cfq_cfqq_slice_new(cfqq));
22e2c507
JA
2728
2729 /*
2730 * idle is disabled, either manually or by past process history
2731 */
80bdf0c7
VG
2732 if (!cfq_should_idle(cfqd, cfqq)) {
2733 /* no queue idling. Check for group idling */
2734 if (cfqd->cfq_group_idle)
2735 group_idle = cfqd->cfq_group_idle;
2736 else
2737 return;
2738 }
6d048f53 2739
7b679138 2740 /*
8e550632 2741 * still active requests from this queue, don't idle
7b679138 2742 */
8e550632 2743 if (cfqq->dispatched)
7b679138
JA
2744 return;
2745
22e2c507
JA
2746 /*
2747 * task has exited, don't wait
2748 */
206dc69b 2749 cic = cfqd->active_cic;
f6e8d01b 2750 if (!cic || !atomic_read(&cic->icq.ioc->active_ref))
6d048f53
JA
2751 return;
2752
355b659c
CZ
2753 /*
2754 * If our average think time is larger than the remaining time
2755 * slice, then don't idle. This avoids overrunning the allotted
2756 * time slice.
2757 */
383cd721
SL
2758 if (sample_valid(cic->ttime.ttime_samples) &&
2759 (cfqq->slice_end - jiffies < cic->ttime.ttime_mean)) {
fd16d263 2760 cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%lu",
383cd721 2761 cic->ttime.ttime_mean);
355b659c 2762 return;
b1ffe737 2763 }
355b659c 2764
80bdf0c7
VG
2765 /* There are other queues in the group, don't do group idle */
2766 if (group_idle && cfqq->cfqg->nr_cfqq > 1)
2767 return;
2768
3b18152c 2769 cfq_mark_cfqq_wait_request(cfqq);
22e2c507 2770
80bdf0c7
VG
2771 if (group_idle)
2772 sl = cfqd->cfq_group_idle;
2773 else
2774 sl = cfqd->cfq_slice_idle;
206dc69b 2775
7b14e3b5 2776 mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
155fead9 2777 cfqg_stats_set_start_idle_time(cfqq->cfqg);
80bdf0c7
VG
2778 cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl,
2779 group_idle ? 1 : 0);
1da177e4
LT
2780}
2781
498d3aa2
JA
2782/*
2783 * Move request from internal lists to the request queue dispatch list.
2784 */
165125e1 2785static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
1da177e4 2786{
3ed9a296 2787 struct cfq_data *cfqd = q->elevator->elevator_data;
5e705374 2788 struct cfq_queue *cfqq = RQ_CFQQ(rq);
22e2c507 2789
7b679138
JA
2790 cfq_log_cfqq(cfqd, cfqq, "dispatch_insert");
2791
06d21886 2792 cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq);
5380a101 2793 cfq_remove_request(rq);
6d048f53 2794 cfqq->dispatched++;
80bdf0c7 2795 (RQ_CFQG(rq))->dispatched++;
5380a101 2796 elv_dispatch_sort(q, rq);
3ed9a296 2797
53c583d2 2798 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
c4e7893e 2799 cfqq->nr_sectors += blk_rq_sectors(rq);
155fead9 2800 cfqg_stats_update_dispatch(cfqq->cfqg, blk_rq_bytes(rq), rq->cmd_flags);
1da177e4
LT
2801}
2802
2803/*
2804 * return expired entry, or NULL to just start from scratch in rbtree
2805 */
febffd61 2806static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
1da177e4 2807{
30996f40 2808 struct request *rq = NULL;
1da177e4 2809
3b18152c 2810 if (cfq_cfqq_fifo_expire(cfqq))
1da177e4 2811 return NULL;
cb887411
JA
2812
2813 cfq_mark_cfqq_fifo_expire(cfqq);
2814
89850f7e
JA
2815 if (list_empty(&cfqq->fifo))
2816 return NULL;
1da177e4 2817
89850f7e 2818 rq = rq_entry_fifo(cfqq->fifo.next);
8b4922d3 2819 if (time_before(jiffies, rq->fifo_time))
7b679138 2820 rq = NULL;
1da177e4 2821
30996f40 2822 cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
6d048f53 2823 return rq;
1da177e4
LT
2824}
2825
22e2c507
JA
2826static inline int
2827cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2828{
2829 const int base_rq = cfqd->cfq_slice_async_rq;
1da177e4 2830
22e2c507 2831 WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
1da177e4 2832
b9f8ce05 2833 return 2 * base_rq * (IOPRIO_BE_NR - cfqq->ioprio);
1da177e4
LT
2834}
2835
df5fe3e8
JM
2836/*
2837 * Must be called with the queue_lock held.
2838 */
2839static int cfqq_process_refs(struct cfq_queue *cfqq)
2840{
2841 int process_refs, io_refs;
2842
2843 io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE];
30d7b944 2844 process_refs = cfqq->ref - io_refs;
df5fe3e8
JM
2845 BUG_ON(process_refs < 0);
2846 return process_refs;
2847}
2848
2849static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
2850{
e6c5bc73 2851 int process_refs, new_process_refs;
df5fe3e8
JM
2852 struct cfq_queue *__cfqq;
2853
c10b61f0
JM
2854 /*
2855 * If there are no process references on the new_cfqq, then it is
2856 * unsafe to follow the ->new_cfqq chain as other cfqq's in the
2857 * chain may have dropped their last reference (not just their
2858 * last process reference).
2859 */
2860 if (!cfqq_process_refs(new_cfqq))
2861 return;
2862
df5fe3e8
JM
2863 /* Avoid a circular list and skip interim queue merges */
2864 while ((__cfqq = new_cfqq->new_cfqq)) {
2865 if (__cfqq == cfqq)
2866 return;
2867 new_cfqq = __cfqq;
2868 }
2869
2870 process_refs = cfqq_process_refs(cfqq);
c10b61f0 2871 new_process_refs = cfqq_process_refs(new_cfqq);
df5fe3e8
JM
2872 /*
2873 * If the process for the cfqq has gone away, there is no
2874 * sense in merging the queues.
2875 */
c10b61f0 2876 if (process_refs == 0 || new_process_refs == 0)
df5fe3e8
JM
2877 return;
2878
e6c5bc73
JM
2879 /*
2880 * Merge in the direction of the lesser amount of work.
2881 */
e6c5bc73
JM
2882 if (new_process_refs >= process_refs) {
2883 cfqq->new_cfqq = new_cfqq;
30d7b944 2884 new_cfqq->ref += process_refs;
e6c5bc73
JM
2885 } else {
2886 new_cfqq->new_cfqq = cfqq;
30d7b944 2887 cfqq->ref += new_process_refs;
e6c5bc73 2888 }
df5fe3e8
JM
2889}
2890
6d816ec7 2891static enum wl_type_t cfq_choose_wl_type(struct cfq_data *cfqd,
3bf10fea 2892 struct cfq_group *cfqg, enum wl_class_t wl_class)
718eee05
CZ
2893{
2894 struct cfq_queue *queue;
2895 int i;
2896 bool key_valid = false;
2897 unsigned long lowest_key = 0;
2898 enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD;
2899
65b32a57
VG
2900 for (i = 0; i <= SYNC_WORKLOAD; ++i) {
2901 /* select the one with lowest rb_key */
34b98d03 2902 queue = cfq_rb_first(st_for(cfqg, wl_class, i));
718eee05
CZ
2903 if (queue &&
2904 (!key_valid || time_before(queue->rb_key, lowest_key))) {
2905 lowest_key = queue->rb_key;
2906 cur_best = i;
2907 key_valid = true;
2908 }
2909 }
2910
2911 return cur_best;
2912}
2913
6d816ec7
VG
2914static void
2915choose_wl_class_and_type(struct cfq_data *cfqd, struct cfq_group *cfqg)
718eee05 2916{
718eee05
CZ
2917 unsigned slice;
2918 unsigned count;
cdb16e8f 2919 struct cfq_rb_root *st;
58ff82f3 2920 unsigned group_slice;
4d2ceea4 2921 enum wl_class_t original_class = cfqd->serving_wl_class;
1fa8f6d6 2922
718eee05 2923 /* Choose next priority. RT > BE > IDLE */
58ff82f3 2924 if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg))
4d2ceea4 2925 cfqd->serving_wl_class = RT_WORKLOAD;
58ff82f3 2926 else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg))
4d2ceea4 2927 cfqd->serving_wl_class = BE_WORKLOAD;
718eee05 2928 else {
4d2ceea4 2929 cfqd->serving_wl_class = IDLE_WORKLOAD;
718eee05
CZ
2930 cfqd->workload_expires = jiffies + 1;
2931 return;
2932 }
2933
4d2ceea4 2934 if (original_class != cfqd->serving_wl_class)
e4ea0c16
SL
2935 goto new_workload;
2936
718eee05
CZ
2937 /*
2938 * For RT and BE, we have to choose also the type
2939 * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload
2940 * expiration time
2941 */
34b98d03 2942 st = st_for(cfqg, cfqd->serving_wl_class, cfqd->serving_wl_type);
cdb16e8f 2943 count = st->count;
718eee05
CZ
2944
2945 /*
65b32a57 2946 * check workload expiration, and that we still have other queues ready
718eee05 2947 */
65b32a57 2948 if (count && !time_after(jiffies, cfqd->workload_expires))
718eee05
CZ
2949 return;
2950
e4ea0c16 2951new_workload:
718eee05 2952 /* otherwise select new workload type */
6d816ec7 2953 cfqd->serving_wl_type = cfq_choose_wl_type(cfqd, cfqg,
4d2ceea4 2954 cfqd->serving_wl_class);
34b98d03 2955 st = st_for(cfqg, cfqd->serving_wl_class, cfqd->serving_wl_type);
cdb16e8f 2956 count = st->count;
718eee05
CZ
2957
2958 /*
2959 * the workload slice is computed as a fraction of target latency
2960 * proportional to the number of queues in that workload, over
2961 * all the queues in the same priority class
2962 */
58ff82f3
VG
2963 group_slice = cfq_group_slice(cfqd, cfqg);
2964
2965 slice = group_slice * count /
4d2ceea4
VG
2966 max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_wl_class],
2967 cfq_group_busy_queues_wl(cfqd->serving_wl_class, cfqd,
3bf10fea 2968 cfqg));
718eee05 2969
4d2ceea4 2970 if (cfqd->serving_wl_type == ASYNC_WORKLOAD) {
f26bd1f0
VG
2971 unsigned int tmp;
2972
2973 /*
2974 * Async queues are currently system wide. Just taking
2975 * proportion of queues with-in same group will lead to higher
2976 * async ratio system wide as generally root group is going
2977 * to have higher weight. A more accurate thing would be to
2978 * calculate system wide asnc/sync ratio.
2979 */
5bf14c07
TM
2980 tmp = cfqd->cfq_target_latency *
2981 cfqg_busy_async_queues(cfqd, cfqg);
f26bd1f0
VG
2982 tmp = tmp/cfqd->busy_queues;
2983 slice = min_t(unsigned, slice, tmp);
2984
718eee05
CZ
2985 /* async workload slice is scaled down according to
2986 * the sync/async slice ratio. */
2987 slice = slice * cfqd->cfq_slice[0] / cfqd->cfq_slice[1];
f26bd1f0 2988 } else
718eee05
CZ
2989 /* sync workload slice is at least 2 * cfq_slice_idle */
2990 slice = max(slice, 2 * cfqd->cfq_slice_idle);
2991
2992 slice = max_t(unsigned, slice, CFQ_MIN_TT);
b1ffe737 2993 cfq_log(cfqd, "workload slice:%d", slice);
718eee05
CZ
2994 cfqd->workload_expires = jiffies + slice;
2995}
2996
1fa8f6d6
VG
2997static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd)
2998{
2999 struct cfq_rb_root *st = &cfqd->grp_service_tree;
25bc6b07 3000 struct cfq_group *cfqg;
1fa8f6d6
VG
3001
3002 if (RB_EMPTY_ROOT(&st->rb))
3003 return NULL;
25bc6b07 3004 cfqg = cfq_rb_first_group(st);
25bc6b07
VG
3005 update_min_vdisktime(st);
3006 return cfqg;
1fa8f6d6
VG
3007}
3008
cdb16e8f
VG
3009static void cfq_choose_cfqg(struct cfq_data *cfqd)
3010{
1fa8f6d6
VG
3011 struct cfq_group *cfqg = cfq_get_next_cfqg(cfqd);
3012
3013 cfqd->serving_group = cfqg;
dae739eb
VG
3014
3015 /* Restore the workload type data */
4d2ceea4
VG
3016 if (cfqg->saved_wl_slice) {
3017 cfqd->workload_expires = jiffies + cfqg->saved_wl_slice;
3018 cfqd->serving_wl_type = cfqg->saved_wl_type;
3019 cfqd->serving_wl_class = cfqg->saved_wl_class;
66ae2919
GJ
3020 } else
3021 cfqd->workload_expires = jiffies - 1;
3022
6d816ec7 3023 choose_wl_class_and_type(cfqd, cfqg);
cdb16e8f
VG
3024}
3025
22e2c507 3026/*
498d3aa2
JA
3027 * Select a queue for service. If we have a current active queue,
3028 * check whether to continue servicing it, or retrieve and set a new one.
22e2c507 3029 */
1b5ed5e1 3030static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
1da177e4 3031{
a36e71f9 3032 struct cfq_queue *cfqq, *new_cfqq = NULL;
1da177e4 3033
22e2c507
JA
3034 cfqq = cfqd->active_queue;
3035 if (!cfqq)
3036 goto new_queue;
1da177e4 3037
f04a6424
VG
3038 if (!cfqd->rq_queued)
3039 return NULL;
c244bb50
VG
3040
3041 /*
3042 * We were waiting for group to get backlogged. Expire the queue
3043 */
3044 if (cfq_cfqq_wait_busy(cfqq) && !RB_EMPTY_ROOT(&cfqq->sort_list))
3045 goto expire;
3046
22e2c507 3047 /*
6d048f53 3048 * The active queue has run out of time, expire it and select new.
22e2c507 3049 */
7667aa06
VG
3050 if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) {
3051 /*
3052 * If slice had not expired at the completion of last request
3053 * we might not have turned on wait_busy flag. Don't expire
3054 * the queue yet. Allow the group to get backlogged.
3055 *
3056 * The very fact that we have used the slice, that means we
3057 * have been idling all along on this queue and it should be
3058 * ok to wait for this request to complete.
3059 */
82bbbf28
VG
3060 if (cfqq->cfqg->nr_cfqq == 1 && RB_EMPTY_ROOT(&cfqq->sort_list)
3061 && cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
3062 cfqq = NULL;
7667aa06 3063 goto keep_queue;
82bbbf28 3064 } else
80bdf0c7 3065 goto check_group_idle;
7667aa06 3066 }
1da177e4 3067
22e2c507 3068 /*
6d048f53
JA
3069 * The active queue has requests and isn't expired, allow it to
3070 * dispatch.
22e2c507 3071 */
dd67d051 3072 if (!RB_EMPTY_ROOT(&cfqq->sort_list))
22e2c507 3073 goto keep_queue;
6d048f53 3074
a36e71f9
JA
3075 /*
3076 * If another queue has a request waiting within our mean seek
3077 * distance, let it run. The expire code will check for close
3078 * cooperators and put the close queue at the front of the service
df5fe3e8 3079 * tree. If possible, merge the expiring queue with the new cfqq.
a36e71f9 3080 */
b3b6d040 3081 new_cfqq = cfq_close_cooperator(cfqd, cfqq);
df5fe3e8
JM
3082 if (new_cfqq) {
3083 if (!cfqq->new_cfqq)
3084 cfq_setup_merge(cfqq, new_cfqq);
a36e71f9 3085 goto expire;
df5fe3e8 3086 }
a36e71f9 3087
6d048f53
JA
3088 /*
3089 * No requests pending. If the active queue still has requests in
3090 * flight or is idling for a new request, allow either of these
3091 * conditions to happen (or time out) before selecting a new queue.
3092 */
80bdf0c7
VG
3093 if (timer_pending(&cfqd->idle_slice_timer)) {
3094 cfqq = NULL;
3095 goto keep_queue;
3096 }
3097
8e1ac665
SL
3098 /*
3099 * This is a deep seek queue, but the device is much faster than
3100 * the queue can deliver, don't idle
3101 **/
3102 if (CFQQ_SEEKY(cfqq) && cfq_cfqq_idle_window(cfqq) &&
3103 (cfq_cfqq_slice_new(cfqq) ||
3104 (cfqq->slice_end - jiffies > jiffies - cfqq->slice_start))) {
3105 cfq_clear_cfqq_deep(cfqq);
3106 cfq_clear_cfqq_idle_window(cfqq);
3107 }
3108
80bdf0c7
VG
3109 if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
3110 cfqq = NULL;
3111 goto keep_queue;
3112 }
3113
3114 /*
3115 * If group idle is enabled and there are requests dispatched from
3116 * this group, wait for requests to complete.
3117 */
3118check_group_idle:
7700fc4f
SL
3119 if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1 &&
3120 cfqq->cfqg->dispatched &&
3121 !cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true)) {
caaa5f9f
JA
3122 cfqq = NULL;
3123 goto keep_queue;
22e2c507
JA
3124 }
3125
3b18152c 3126expire:
e5ff082e 3127 cfq_slice_expired(cfqd, 0);
3b18152c 3128new_queue:
718eee05
CZ
3129 /*
3130 * Current queue expired. Check if we have to switch to a new
3131 * service tree
3132 */
3133 if (!new_cfqq)
cdb16e8f 3134 cfq_choose_cfqg(cfqd);
718eee05 3135
a36e71f9 3136 cfqq = cfq_set_active_queue(cfqd, new_cfqq);
22e2c507 3137keep_queue:
3b18152c 3138 return cfqq;
22e2c507
JA
3139}
3140
febffd61 3141static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
d9e7620e
JA
3142{
3143 int dispatched = 0;
3144
3145 while (cfqq->next_rq) {
3146 cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
3147 dispatched++;
3148 }
3149
3150 BUG_ON(!list_empty(&cfqq->fifo));
f04a6424
VG
3151
3152 /* By default cfqq is not expired if it is empty. Do it explicitly */
e5ff082e 3153 __cfq_slice_expired(cfqq->cfqd, cfqq, 0);
d9e7620e
JA
3154 return dispatched;
3155}
3156
498d3aa2
JA
3157/*
3158 * Drain our current requests. Used for barriers and when switching
3159 * io schedulers on-the-fly.
3160 */
d9e7620e 3161static int cfq_forced_dispatch(struct cfq_data *cfqd)
1b5ed5e1 3162{
0871714e 3163 struct cfq_queue *cfqq;
d9e7620e 3164 int dispatched = 0;
cdb16e8f 3165
3440c49f 3166 /* Expire the timeslice of the current active queue first */
e5ff082e 3167 cfq_slice_expired(cfqd, 0);
3440c49f
DS
3168 while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) {
3169 __cfq_set_active_queue(cfqd, cfqq);
f04a6424 3170 dispatched += __cfq_forced_dispatch_cfqq(cfqq);
3440c49f 3171 }
1b5ed5e1 3172
1b5ed5e1
TH
3173 BUG_ON(cfqd->busy_queues);
3174
6923715a 3175 cfq_log(cfqd, "forced_dispatch=%d", dispatched);
1b5ed5e1
TH
3176 return dispatched;
3177}
3178
abc3c744
SL
3179static inline bool cfq_slice_used_soon(struct cfq_data *cfqd,
3180 struct cfq_queue *cfqq)
3181{
3182 /* the queue hasn't finished any request, can't estimate */
3183 if (cfq_cfqq_slice_new(cfqq))
c1e44756 3184 return true;
abc3c744
SL
3185 if (time_after(jiffies + cfqd->cfq_slice_idle * cfqq->dispatched,
3186 cfqq->slice_end))
c1e44756 3187 return true;
abc3c744 3188
c1e44756 3189 return false;
abc3c744
SL
3190}
3191
0b182d61 3192static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2f5cb738 3193{
2f5cb738 3194 unsigned int max_dispatch;
22e2c507 3195
5ad531db
JA
3196 /*
3197 * Drain async requests before we start sync IO
3198 */
53c583d2 3199 if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_flight[BLK_RW_ASYNC])
0b182d61 3200 return false;
5ad531db 3201
2f5cb738
JA
3202 /*
3203 * If this is an async queue and we have sync IO in flight, let it wait
3204 */
53c583d2 3205 if (cfqd->rq_in_flight[BLK_RW_SYNC] && !cfq_cfqq_sync(cfqq))
0b182d61 3206 return false;
2f5cb738 3207
abc3c744 3208 max_dispatch = max_t(unsigned int, cfqd->cfq_quantum / 2, 1);
2f5cb738
JA
3209 if (cfq_class_idle(cfqq))
3210 max_dispatch = 1;
b4878f24 3211
2f5cb738
JA
3212 /*
3213 * Does this cfqq already have too much IO in flight?
3214 */
3215 if (cfqq->dispatched >= max_dispatch) {
ef8a41df 3216 bool promote_sync = false;
2f5cb738
JA
3217 /*
3218 * idle queue must always only have a single IO in flight
3219 */
3ed9a296 3220 if (cfq_class_idle(cfqq))
0b182d61 3221 return false;
3ed9a296 3222
ef8a41df 3223 /*
c4ade94f
LS
3224 * If there is only one sync queue
3225 * we can ignore async queue here and give the sync
ef8a41df
SL
3226 * queue no dispatch limit. The reason is a sync queue can
3227 * preempt async queue, limiting the sync queue doesn't make
3228 * sense. This is useful for aiostress test.
3229 */
c4ade94f
LS
3230 if (cfq_cfqq_sync(cfqq) && cfqd->busy_sync_queues == 1)
3231 promote_sync = true;
ef8a41df 3232
2f5cb738
JA
3233 /*
3234 * We have other queues, don't allow more IO from this one
3235 */
ef8a41df
SL
3236 if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq) &&
3237 !promote_sync)
0b182d61 3238 return false;
9ede209e 3239
365722bb 3240 /*
474b18cc 3241 * Sole queue user, no limit
365722bb 3242 */
ef8a41df 3243 if (cfqd->busy_queues == 1 || promote_sync)
abc3c744
SL
3244 max_dispatch = -1;
3245 else
3246 /*
3247 * Normally we start throttling cfqq when cfq_quantum/2
3248 * requests have been dispatched. But we can drive
3249 * deeper queue depths at the beginning of slice
3250 * subjected to upper limit of cfq_quantum.
3251 * */
3252 max_dispatch = cfqd->cfq_quantum;
8e296755
JA
3253 }
3254
3255 /*
3256 * Async queues must wait a bit before being allowed dispatch.
3257 * We also ramp up the dispatch depth gradually for async IO,
3258 * based on the last sync IO we serviced
3259 */
963b72fc 3260 if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
573412b2 3261 unsigned long last_sync = jiffies - cfqd->last_delayed_sync;
8e296755 3262 unsigned int depth;
365722bb 3263
61f0c1dc 3264 depth = last_sync / cfqd->cfq_slice[1];
e00c54c3
JA
3265 if (!depth && !cfqq->dispatched)
3266 depth = 1;
8e296755
JA
3267 if (depth < max_dispatch)
3268 max_dispatch = depth;
2f5cb738 3269 }
3ed9a296 3270
0b182d61
JA
3271 /*
3272 * If we're below the current max, allow a dispatch
3273 */
3274 return cfqq->dispatched < max_dispatch;
3275}
3276
3277/*
3278 * Dispatch a request from cfqq, moving them to the request queue
3279 * dispatch list.
3280 */
3281static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3282{
3283 struct request *rq;
3284
3285 BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
3286
3287 if (!cfq_may_dispatch(cfqd, cfqq))
3288 return false;
3289
3290 /*
3291 * follow expired path, else get first next available
3292 */
3293 rq = cfq_check_fifo(cfqq);
3294 if (!rq)
3295 rq = cfqq->next_rq;
3296
3297 /*
3298 * insert request into driver dispatch list
3299 */
3300 cfq_dispatch_insert(cfqd->queue, rq);
3301
3302 if (!cfqd->active_cic) {
c5869807 3303 struct cfq_io_cq *cic = RQ_CIC(rq);
0b182d61 3304
c5869807 3305 atomic_long_inc(&cic->icq.ioc->refcount);
0b182d61
JA
3306 cfqd->active_cic = cic;
3307 }
3308
3309 return true;
3310}
3311
3312/*
3313 * Find the cfqq that we need to service and move a request from that to the
3314 * dispatch list
3315 */
3316static int cfq_dispatch_requests(struct request_queue *q, int force)
3317{
3318 struct cfq_data *cfqd = q->elevator->elevator_data;
3319 struct cfq_queue *cfqq;
3320
3321 if (!cfqd->busy_queues)
3322 return 0;
3323
3324 if (unlikely(force))
3325 return cfq_forced_dispatch(cfqd);
3326
3327 cfqq = cfq_select_queue(cfqd);
3328 if (!cfqq)
8e296755
JA
3329 return 0;
3330
2f5cb738 3331 /*
0b182d61 3332 * Dispatch a request from this cfqq, if it is allowed
2f5cb738 3333 */
0b182d61
JA
3334 if (!cfq_dispatch_request(cfqd, cfqq))
3335 return 0;
3336
2f5cb738 3337 cfqq->slice_dispatch++;
b029195d 3338 cfq_clear_cfqq_must_dispatch(cfqq);
22e2c507 3339
2f5cb738
JA
3340 /*
3341 * expire an async queue immediately if it has used up its slice. idle
3342 * queue always expire after 1 dispatch round.
3343 */
3344 if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
3345 cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
3346 cfq_class_idle(cfqq))) {
3347 cfqq->slice_end = jiffies + 1;
e5ff082e 3348 cfq_slice_expired(cfqd, 0);
1da177e4
LT
3349 }
3350
b217a903 3351 cfq_log_cfqq(cfqd, cfqq, "dispatched a request");
2f5cb738 3352 return 1;
1da177e4
LT
3353}
3354
1da177e4 3355/*
5e705374
JA
3356 * task holds one reference to the queue, dropped when task exits. each rq
3357 * in-flight on this queue also holds a reference, dropped when rq is freed.
1da177e4 3358 *
b1c35769 3359 * Each cfq queue took a reference on the parent group. Drop it now.
1da177e4
LT
3360 * queue lock must be held here.
3361 */
3362static void cfq_put_queue(struct cfq_queue *cfqq)
3363{
22e2c507 3364 struct cfq_data *cfqd = cfqq->cfqd;
0bbfeb83 3365 struct cfq_group *cfqg;
22e2c507 3366
30d7b944 3367 BUG_ON(cfqq->ref <= 0);
1da177e4 3368
30d7b944
SL
3369 cfqq->ref--;
3370 if (cfqq->ref)
1da177e4
LT
3371 return;
3372
7b679138 3373 cfq_log_cfqq(cfqd, cfqq, "put_queue");
1da177e4 3374 BUG_ON(rb_first(&cfqq->sort_list));
22e2c507 3375 BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
b1c35769 3376 cfqg = cfqq->cfqg;
1da177e4 3377
28f95cbc 3378 if (unlikely(cfqd->active_queue == cfqq)) {
e5ff082e 3379 __cfq_slice_expired(cfqd, cfqq, 0);
23e018a1 3380 cfq_schedule_dispatch(cfqd);
28f95cbc 3381 }
22e2c507 3382
f04a6424 3383 BUG_ON(cfq_cfqq_on_rr(cfqq));
1da177e4 3384 kmem_cache_free(cfq_pool, cfqq);
eb7d8c07 3385 cfqg_put(cfqg);
1da177e4
LT
3386}
3387
d02a2c07 3388static void cfq_put_cooperator(struct cfq_queue *cfqq)
1da177e4 3389{
df5fe3e8
JM
3390 struct cfq_queue *__cfqq, *next;
3391
df5fe3e8
JM
3392 /*
3393 * If this queue was scheduled to merge with another queue, be
3394 * sure to drop the reference taken on that queue (and others in
3395 * the merge chain). See cfq_setup_merge and cfq_merge_cfqqs.
3396 */
3397 __cfqq = cfqq->new_cfqq;
3398 while (__cfqq) {
3399 if (__cfqq == cfqq) {
3400 WARN(1, "cfqq->new_cfqq loop detected\n");
3401 break;
3402 }
3403 next = __cfqq->new_cfqq;
3404 cfq_put_queue(__cfqq);
3405 __cfqq = next;
3406 }
d02a2c07
SL
3407}
3408
3409static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3410{
3411 if (unlikely(cfqq == cfqd->active_queue)) {
3412 __cfq_slice_expired(cfqd, cfqq, 0);
3413 cfq_schedule_dispatch(cfqd);
3414 }
3415
3416 cfq_put_cooperator(cfqq);
df5fe3e8 3417
89850f7e
JA
3418 cfq_put_queue(cfqq);
3419}
22e2c507 3420
9b84cacd
TH
3421static void cfq_init_icq(struct io_cq *icq)
3422{
3423 struct cfq_io_cq *cic = icq_to_cic(icq);
3424
3425 cic->ttime.last_end_request = jiffies;
3426}
3427
c5869807 3428static void cfq_exit_icq(struct io_cq *icq)
89850f7e 3429{
c5869807 3430 struct cfq_io_cq *cic = icq_to_cic(icq);
283287a5 3431 struct cfq_data *cfqd = cic_to_cfqd(cic);
4faa3c81 3432
ff6657c6
JA
3433 if (cic->cfqq[BLK_RW_ASYNC]) {
3434 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]);
3435 cic->cfqq[BLK_RW_ASYNC] = NULL;
12a05732
AV
3436 }
3437
ff6657c6
JA
3438 if (cic->cfqq[BLK_RW_SYNC]) {
3439 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_SYNC]);
3440 cic->cfqq[BLK_RW_SYNC] = NULL;
12a05732 3441 }
89850f7e
JA
3442}
3443
abede6da 3444static void cfq_init_prio_data(struct cfq_queue *cfqq, struct cfq_io_cq *cic)
22e2c507
JA
3445{
3446 struct task_struct *tsk = current;
3447 int ioprio_class;
3448
3b18152c 3449 if (!cfq_cfqq_prio_changed(cfqq))
22e2c507
JA
3450 return;
3451
598971bf 3452 ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
22e2c507 3453 switch (ioprio_class) {
fe094d98
JA
3454 default:
3455 printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
3456 case IOPRIO_CLASS_NONE:
3457 /*
6d63c275 3458 * no prio set, inherit CPU scheduling settings
fe094d98
JA
3459 */
3460 cfqq->ioprio = task_nice_ioprio(tsk);
6d63c275 3461 cfqq->ioprio_class = task_nice_ioclass(tsk);
fe094d98
JA
3462 break;
3463 case IOPRIO_CLASS_RT:
598971bf 3464 cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
fe094d98
JA
3465 cfqq->ioprio_class = IOPRIO_CLASS_RT;
3466 break;
3467 case IOPRIO_CLASS_BE:
598971bf 3468 cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
fe094d98
JA
3469 cfqq->ioprio_class = IOPRIO_CLASS_BE;
3470 break;
3471 case IOPRIO_CLASS_IDLE:
3472 cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
3473 cfqq->ioprio = 7;
3474 cfq_clear_cfqq_idle_window(cfqq);
3475 break;
22e2c507
JA
3476 }
3477
3478 /*
3479 * keep track of original prio settings in case we have to temporarily
3480 * elevate the priority of this queue
3481 */
3482 cfqq->org_ioprio = cfqq->ioprio;
3b18152c 3483 cfq_clear_cfqq_prio_changed(cfqq);
22e2c507
JA
3484}
3485
598971bf 3486static void check_ioprio_changed(struct cfq_io_cq *cic, struct bio *bio)
22e2c507 3487{
598971bf 3488 int ioprio = cic->icq.ioc->ioprio;
bca4b914 3489 struct cfq_data *cfqd = cic_to_cfqd(cic);
478a82b0 3490 struct cfq_queue *cfqq;
35e6077c 3491
598971bf
TH
3492 /*
3493 * Check whether ioprio has changed. The condition may trigger
3494 * spuriously on a newly created cic but there's no harm.
3495 */
3496 if (unlikely(!cfqd) || likely(cic->ioprio == ioprio))
caaa5f9f
JA
3497 return;
3498
ff6657c6 3499 cfqq = cic->cfqq[BLK_RW_ASYNC];
caaa5f9f
JA
3500 if (cfqq) {
3501 struct cfq_queue *new_cfqq;
abede6da
TH
3502 new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic, bio,
3503 GFP_ATOMIC);
caaa5f9f 3504 if (new_cfqq) {
ff6657c6 3505 cic->cfqq[BLK_RW_ASYNC] = new_cfqq;
caaa5f9f
JA
3506 cfq_put_queue(cfqq);
3507 }
22e2c507 3508 }
caaa5f9f 3509
ff6657c6 3510 cfqq = cic->cfqq[BLK_RW_SYNC];
caaa5f9f
JA
3511 if (cfqq)
3512 cfq_mark_cfqq_prio_changed(cfqq);
598971bf
TH
3513
3514 cic->ioprio = ioprio;
22e2c507
JA
3515}
3516
d5036d77 3517static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
a6151c3a 3518 pid_t pid, bool is_sync)
d5036d77
JA
3519{
3520 RB_CLEAR_NODE(&cfqq->rb_node);
3521 RB_CLEAR_NODE(&cfqq->p_node);
3522 INIT_LIST_HEAD(&cfqq->fifo);
3523
30d7b944 3524 cfqq->ref = 0;
d5036d77
JA
3525 cfqq->cfqd = cfqd;
3526
3527 cfq_mark_cfqq_prio_changed(cfqq);
3528
3529 if (is_sync) {
3530 if (!cfq_class_idle(cfqq))
3531 cfq_mark_cfqq_idle_window(cfqq);
3532 cfq_mark_cfqq_sync(cfqq);
3533 }
3534 cfqq->pid = pid;
3535}
3536
24610333 3537#ifdef CONFIG_CFQ_GROUP_IOSCHED
598971bf 3538static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
24610333 3539{
bca4b914 3540 struct cfq_data *cfqd = cic_to_cfqd(cic);
598971bf
TH
3541 struct cfq_queue *sync_cfqq;
3542 uint64_t id;
24610333 3543
598971bf 3544 rcu_read_lock();
3c798398 3545 id = bio_blkcg(bio)->id;
598971bf 3546 rcu_read_unlock();
24610333 3547
598971bf
TH
3548 /*
3549 * Check whether blkcg has changed. The condition may trigger
3550 * spuriously on a newly created cic but there's no harm.
3551 */
3552 if (unlikely(!cfqd) || likely(cic->blkcg_id == id))
3553 return;
24610333 3554
598971bf 3555 sync_cfqq = cic_to_cfqq(cic, 1);
24610333
VG
3556 if (sync_cfqq) {
3557 /*
3558 * Drop reference to sync queue. A new sync queue will be
3559 * assigned in new group upon arrival of a fresh request.
3560 */
3561 cfq_log_cfqq(cfqd, sync_cfqq, "changed cgroup");
3562 cic_set_cfqq(cic, NULL, 1);
3563 cfq_put_queue(sync_cfqq);
3564 }
598971bf
TH
3565
3566 cic->blkcg_id = id;
24610333 3567}
598971bf
TH
3568#else
3569static inline void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio) { }
24610333
VG
3570#endif /* CONFIG_CFQ_GROUP_IOSCHED */
3571
22e2c507 3572static struct cfq_queue *
abede6da
TH
3573cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
3574 struct bio *bio, gfp_t gfp_mask)
22e2c507 3575{
3c798398 3576 struct blkcg *blkcg;
22e2c507 3577 struct cfq_queue *cfqq, *new_cfqq = NULL;
cdb16e8f 3578 struct cfq_group *cfqg;
22e2c507
JA
3579
3580retry:
2a7f1244
TH
3581 rcu_read_lock();
3582
3c798398 3583 blkcg = bio_blkcg(bio);
cd1604fa 3584 cfqg = cfq_lookup_create_cfqg(cfqd, blkcg);
91fac317 3585 cfqq = cic_to_cfqq(cic, is_sync);
22e2c507 3586
6118b70b
JA
3587 /*
3588 * Always try a new alloc if we fell back to the OOM cfqq
3589 * originally, since it should just be a temporary situation.
3590 */
3591 if (!cfqq || cfqq == &cfqd->oom_cfqq) {
3592 cfqq = NULL;
22e2c507
JA
3593 if (new_cfqq) {
3594 cfqq = new_cfqq;
3595 new_cfqq = NULL;
3596 } else if (gfp_mask & __GFP_WAIT) {
2a7f1244 3597 rcu_read_unlock();
22e2c507 3598 spin_unlock_irq(cfqd->queue->queue_lock);
94f6030c 3599 new_cfqq = kmem_cache_alloc_node(cfq_pool,
6118b70b 3600 gfp_mask | __GFP_ZERO,
94f6030c 3601 cfqd->queue->node);
22e2c507 3602 spin_lock_irq(cfqd->queue->queue_lock);
6118b70b
JA
3603 if (new_cfqq)
3604 goto retry;
a3cc86c2
GC
3605 else
3606 return &cfqd->oom_cfqq;
22e2c507 3607 } else {
94f6030c
CL
3608 cfqq = kmem_cache_alloc_node(cfq_pool,
3609 gfp_mask | __GFP_ZERO,
3610 cfqd->queue->node);
22e2c507
JA
3611 }
3612
6118b70b
JA
3613 if (cfqq) {
3614 cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
abede6da 3615 cfq_init_prio_data(cfqq, cic);
cdb16e8f 3616 cfq_link_cfqq_cfqg(cfqq, cfqg);
6118b70b
JA
3617 cfq_log_cfqq(cfqd, cfqq, "alloced");
3618 } else
3619 cfqq = &cfqd->oom_cfqq;
22e2c507
JA
3620 }
3621
3622 if (new_cfqq)
3623 kmem_cache_free(cfq_pool, new_cfqq);
3624
2a7f1244 3625 rcu_read_unlock();
22e2c507
JA
3626 return cfqq;
3627}
3628
c2dea2d1
VT
3629static struct cfq_queue **
3630cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
3631{
fe094d98 3632 switch (ioprio_class) {
c2dea2d1
VT
3633 case IOPRIO_CLASS_RT:
3634 return &cfqd->async_cfqq[0][ioprio];
598971bf
TH
3635 case IOPRIO_CLASS_NONE:
3636 ioprio = IOPRIO_NORM;
3637 /* fall through */
c2dea2d1
VT
3638 case IOPRIO_CLASS_BE:
3639 return &cfqd->async_cfqq[1][ioprio];
3640 case IOPRIO_CLASS_IDLE:
3641 return &cfqd->async_idle_cfqq;
3642 default:
3643 BUG();
3644 }
3645}
3646
15c31be4 3647static struct cfq_queue *
abede6da 3648cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
4f85cb96 3649 struct bio *bio, gfp_t gfp_mask)
15c31be4 3650{
598971bf
TH
3651 const int ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
3652 const int ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
c2dea2d1 3653 struct cfq_queue **async_cfqq = NULL;
15c31be4
JA
3654 struct cfq_queue *cfqq = NULL;
3655
c2dea2d1
VT
3656 if (!is_sync) {
3657 async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio);
3658 cfqq = *async_cfqq;
3659 }
3660
6118b70b 3661 if (!cfqq)
abede6da 3662 cfqq = cfq_find_alloc_queue(cfqd, is_sync, cic, bio, gfp_mask);
15c31be4
JA
3663
3664 /*
3665 * pin the queue now that it's allocated, scheduler exit will prune it
3666 */
c2dea2d1 3667 if (!is_sync && !(*async_cfqq)) {
30d7b944 3668 cfqq->ref++;
c2dea2d1 3669 *async_cfqq = cfqq;
15c31be4
JA
3670 }
3671
30d7b944 3672 cfqq->ref++;
15c31be4
JA
3673 return cfqq;
3674}
3675
22e2c507 3676static void
383cd721 3677__cfq_update_io_thinktime(struct cfq_ttime *ttime, unsigned long slice_idle)
1da177e4 3678{
383cd721
SL
3679 unsigned long elapsed = jiffies - ttime->last_end_request;
3680 elapsed = min(elapsed, 2UL * slice_idle);
db3b5848 3681
383cd721
SL
3682 ttime->ttime_samples = (7*ttime->ttime_samples + 256) / 8;
3683 ttime->ttime_total = (7*ttime->ttime_total + 256*elapsed) / 8;
3684 ttime->ttime_mean = (ttime->ttime_total + 128) / ttime->ttime_samples;
3685}
3686
3687static void
3688cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
c5869807 3689 struct cfq_io_cq *cic)
383cd721 3690{
f5f2b6ce 3691 if (cfq_cfqq_sync(cfqq)) {
383cd721 3692 __cfq_update_io_thinktime(&cic->ttime, cfqd->cfq_slice_idle);
f5f2b6ce
SL
3693 __cfq_update_io_thinktime(&cfqq->service_tree->ttime,
3694 cfqd->cfq_slice_idle);
3695 }
7700fc4f
SL
3696#ifdef CONFIG_CFQ_GROUP_IOSCHED
3697 __cfq_update_io_thinktime(&cfqq->cfqg->ttime, cfqd->cfq_group_idle);
3698#endif
22e2c507 3699}
1da177e4 3700
206dc69b 3701static void
b2c18e1e 3702cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
6d048f53 3703 struct request *rq)
206dc69b 3704{
3dde36dd 3705 sector_t sdist = 0;
41647e7a 3706 sector_t n_sec = blk_rq_sectors(rq);
3dde36dd
CZ
3707 if (cfqq->last_request_pos) {
3708 if (cfqq->last_request_pos < blk_rq_pos(rq))
3709 sdist = blk_rq_pos(rq) - cfqq->last_request_pos;
3710 else
3711 sdist = cfqq->last_request_pos - blk_rq_pos(rq);
3712 }
206dc69b 3713
3dde36dd 3714 cfqq->seek_history <<= 1;
41647e7a
CZ
3715 if (blk_queue_nonrot(cfqd->queue))
3716 cfqq->seek_history |= (n_sec < CFQQ_SECT_THR_NONROT);
3717 else
3718 cfqq->seek_history |= (sdist > CFQQ_SEEK_THR);
206dc69b 3719}
1da177e4 3720
22e2c507
JA
3721/*
3722 * Disable idle window if the process thinks too long or seeks so much that
3723 * it doesn't matter
3724 */
3725static void
3726cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
c5869807 3727 struct cfq_io_cq *cic)
22e2c507 3728{
7b679138 3729 int old_idle, enable_idle;
1be92f2f 3730
0871714e
JA
3731 /*
3732 * Don't idle for async or idle io prio class
3733 */
3734 if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq))
1be92f2f
JA
3735 return;
3736
c265a7f4 3737 enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
1da177e4 3738
76280aff
CZ
3739 if (cfqq->queued[0] + cfqq->queued[1] >= 4)
3740 cfq_mark_cfqq_deep(cfqq);
3741
749ef9f8
CZ
3742 if (cfqq->next_rq && (cfqq->next_rq->cmd_flags & REQ_NOIDLE))
3743 enable_idle = 0;
f6e8d01b 3744 else if (!atomic_read(&cic->icq.ioc->active_ref) ||
c5869807
TH
3745 !cfqd->cfq_slice_idle ||
3746 (!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq)))
22e2c507 3747 enable_idle = 0;
383cd721
SL
3748 else if (sample_valid(cic->ttime.ttime_samples)) {
3749 if (cic->ttime.ttime_mean > cfqd->cfq_slice_idle)
22e2c507
JA
3750 enable_idle = 0;
3751 else
3752 enable_idle = 1;
1da177e4
LT
3753 }
3754
7b679138
JA
3755 if (old_idle != enable_idle) {
3756 cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle);
3757 if (enable_idle)
3758 cfq_mark_cfqq_idle_window(cfqq);
3759 else
3760 cfq_clear_cfqq_idle_window(cfqq);
3761 }
22e2c507 3762}
1da177e4 3763
22e2c507
JA
3764/*
3765 * Check if new_cfqq should preempt the currently active queue. Return 0 for
3766 * no or if we aren't sure, a 1 will cause a preempt.
3767 */
a6151c3a 3768static bool
22e2c507 3769cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
5e705374 3770 struct request *rq)
22e2c507 3771{
6d048f53 3772 struct cfq_queue *cfqq;
22e2c507 3773
6d048f53
JA
3774 cfqq = cfqd->active_queue;
3775 if (!cfqq)
a6151c3a 3776 return false;
22e2c507 3777
6d048f53 3778 if (cfq_class_idle(new_cfqq))
a6151c3a 3779 return false;
22e2c507
JA
3780
3781 if (cfq_class_idle(cfqq))
a6151c3a 3782 return true;
1e3335de 3783
875feb63
DS
3784 /*
3785 * Don't allow a non-RT request to preempt an ongoing RT cfqq timeslice.
3786 */
3787 if (cfq_class_rt(cfqq) && !cfq_class_rt(new_cfqq))
3788 return false;
3789
374f84ac
JA
3790 /*
3791 * if the new request is sync, but the currently running queue is
3792 * not, let the sync request have priority.
3793 */
5e705374 3794 if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
a6151c3a 3795 return true;
1e3335de 3796
8682e1f1
VG
3797 if (new_cfqq->cfqg != cfqq->cfqg)
3798 return false;
3799
3800 if (cfq_slice_used(cfqq))
3801 return true;
3802
3803 /* Allow preemption only if we are idling on sync-noidle tree */
4d2ceea4 3804 if (cfqd->serving_wl_type == SYNC_NOIDLE_WORKLOAD &&
8682e1f1
VG
3805 cfqq_type(new_cfqq) == SYNC_NOIDLE_WORKLOAD &&
3806 new_cfqq->service_tree->count == 2 &&
3807 RB_EMPTY_ROOT(&cfqq->sort_list))
3808 return true;
3809
b53d1ed7
JA
3810 /*
3811 * So both queues are sync. Let the new request get disk time if
3812 * it's a metadata request and the current queue is doing regular IO.
3813 */
65299a3b 3814 if ((rq->cmd_flags & REQ_PRIO) && !cfqq->prio_pending)
b53d1ed7
JA
3815 return true;
3816
3a9a3f6c
DS
3817 /*
3818 * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
3819 */
3820 if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
a6151c3a 3821 return true;
3a9a3f6c 3822
d2d59e18
SL
3823 /* An idle queue should not be idle now for some reason */
3824 if (RB_EMPTY_ROOT(&cfqq->sort_list) && !cfq_should_idle(cfqd, cfqq))
3825 return true;
3826
1e3335de 3827 if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
a6151c3a 3828 return false;
1e3335de
JA
3829
3830 /*
3831 * if this request is as-good as one we would expect from the
3832 * current cfqq, let it preempt
3833 */
e9ce335d 3834 if (cfq_rq_close(cfqd, cfqq, rq))
a6151c3a 3835 return true;
1e3335de 3836
a6151c3a 3837 return false;
22e2c507
JA
3838}
3839
3840/*
3841 * cfqq preempts the active queue. if we allowed preempt with no slice left,
3842 * let it have half of its nominal slice.
3843 */
3844static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3845{
df0793ab
SL
3846 enum wl_type_t old_type = cfqq_type(cfqd->active_queue);
3847
7b679138 3848 cfq_log_cfqq(cfqd, cfqq, "preempt");
df0793ab 3849 cfq_slice_expired(cfqd, 1);
22e2c507 3850
f8ae6e3e
SL
3851 /*
3852 * workload type is changed, don't save slice, otherwise preempt
3853 * doesn't happen
3854 */
df0793ab 3855 if (old_type != cfqq_type(cfqq))
4d2ceea4 3856 cfqq->cfqg->saved_wl_slice = 0;
f8ae6e3e 3857
bf572256
JA
3858 /*
3859 * Put the new queue at the front of the of the current list,
3860 * so we know that it will be selected next.
3861 */
3862 BUG_ON(!cfq_cfqq_on_rr(cfqq));
edd75ffd
JA
3863
3864 cfq_service_tree_add(cfqd, cfqq, 1);
eda5e0c9 3865
62a37f6b
JT
3866 cfqq->slice_end = 0;
3867 cfq_mark_cfqq_slice_new(cfqq);
22e2c507
JA
3868}
3869
22e2c507 3870/*
5e705374 3871 * Called when a new fs request (rq) is added (to cfqq). Check if there's
22e2c507
JA
3872 * something we should do about it
3873 */
3874static void
5e705374
JA
3875cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3876 struct request *rq)
22e2c507 3877{
c5869807 3878 struct cfq_io_cq *cic = RQ_CIC(rq);
12e9fddd 3879
45333d5a 3880 cfqd->rq_queued++;
65299a3b
CH
3881 if (rq->cmd_flags & REQ_PRIO)
3882 cfqq->prio_pending++;
374f84ac 3883
383cd721 3884 cfq_update_io_thinktime(cfqd, cfqq, cic);
b2c18e1e 3885 cfq_update_io_seektime(cfqd, cfqq, rq);
9c2c38a1
JA
3886 cfq_update_idle_window(cfqd, cfqq, cic);
3887
b2c18e1e 3888 cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
22e2c507
JA
3889
3890 if (cfqq == cfqd->active_queue) {
3891 /*
b029195d
JA
3892 * Remember that we saw a request from this process, but
3893 * don't start queuing just yet. Otherwise we risk seeing lots
3894 * of tiny requests, because we disrupt the normal plugging
d6ceb25e
JA
3895 * and merging. If the request is already larger than a single
3896 * page, let it rip immediately. For that case we assume that
2d870722
JA
3897 * merging is already done. Ditto for a busy system that
3898 * has other work pending, don't risk delaying until the
3899 * idle timer unplug to continue working.
22e2c507 3900 */
d6ceb25e 3901 if (cfq_cfqq_wait_request(cfqq)) {
2d870722
JA
3902 if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
3903 cfqd->busy_queues > 1) {
812df48d 3904 cfq_del_timer(cfqd, cfqq);
554554f6 3905 cfq_clear_cfqq_wait_request(cfqq);
24ecfbe2 3906 __blk_run_queue(cfqd->queue);
a11cdaa7 3907 } else {
155fead9 3908 cfqg_stats_update_idle_time(cfqq->cfqg);
bf791937 3909 cfq_mark_cfqq_must_dispatch(cfqq);
a11cdaa7 3910 }
d6ceb25e 3911 }
5e705374 3912 } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
22e2c507
JA
3913 /*
3914 * not the active queue - expire current slice if it is
3915 * idle and has expired it's mean thinktime or this new queue
3a9a3f6c
DS
3916 * has some old slice time left and is of higher priority or
3917 * this new queue is RT and the current one is BE
22e2c507
JA
3918 */
3919 cfq_preempt_queue(cfqd, cfqq);
24ecfbe2 3920 __blk_run_queue(cfqd->queue);
22e2c507 3921 }
1da177e4
LT
3922}
3923
165125e1 3924static void cfq_insert_request(struct request_queue *q, struct request *rq)
1da177e4 3925{
b4878f24 3926 struct cfq_data *cfqd = q->elevator->elevator_data;
5e705374 3927 struct cfq_queue *cfqq = RQ_CFQQ(rq);
22e2c507 3928
7b679138 3929 cfq_log_cfqq(cfqd, cfqq, "insert_request");
abede6da 3930 cfq_init_prio_data(cfqq, RQ_CIC(rq));
1da177e4 3931
8b4922d3 3932 rq->fifo_time = jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)];
22e2c507 3933 list_add_tail(&rq->queuelist, &cfqq->fifo);
aa6f6a3d 3934 cfq_add_rq_rb(rq);
155fead9
TH
3935 cfqg_stats_update_io_add(RQ_CFQG(rq), cfqd->serving_group,
3936 rq->cmd_flags);
5e705374 3937 cfq_rq_enqueued(cfqd, cfqq, rq);
1da177e4
LT
3938}
3939
45333d5a
AC
3940/*
3941 * Update hw_tag based on peak queue depth over 50 samples under
3942 * sufficient load.
3943 */
3944static void cfq_update_hw_tag(struct cfq_data *cfqd)
3945{
1a1238a7
SL
3946 struct cfq_queue *cfqq = cfqd->active_queue;
3947
53c583d2
CZ
3948 if (cfqd->rq_in_driver > cfqd->hw_tag_est_depth)
3949 cfqd->hw_tag_est_depth = cfqd->rq_in_driver;
e459dd08
CZ
3950
3951 if (cfqd->hw_tag == 1)
3952 return;
45333d5a
AC
3953
3954 if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN &&
53c583d2 3955 cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN)
45333d5a
AC
3956 return;
3957
1a1238a7
SL
3958 /*
3959 * If active queue hasn't enough requests and can idle, cfq might not
3960 * dispatch sufficient requests to hardware. Don't zero hw_tag in this
3961 * case
3962 */
3963 if (cfqq && cfq_cfqq_idle_window(cfqq) &&
3964 cfqq->dispatched + cfqq->queued[0] + cfqq->queued[1] <
53c583d2 3965 CFQ_HW_QUEUE_MIN && cfqd->rq_in_driver < CFQ_HW_QUEUE_MIN)
1a1238a7
SL
3966 return;
3967
45333d5a
AC
3968 if (cfqd->hw_tag_samples++ < 50)
3969 return;
3970
e459dd08 3971 if (cfqd->hw_tag_est_depth >= CFQ_HW_QUEUE_MIN)
45333d5a
AC
3972 cfqd->hw_tag = 1;
3973 else
3974 cfqd->hw_tag = 0;
45333d5a
AC
3975}
3976
7667aa06
VG
3977static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3978{
c5869807 3979 struct cfq_io_cq *cic = cfqd->active_cic;
7667aa06 3980
02a8f01b
JT
3981 /* If the queue already has requests, don't wait */
3982 if (!RB_EMPTY_ROOT(&cfqq->sort_list))
3983 return false;
3984
7667aa06
VG
3985 /* If there are other queues in the group, don't wait */
3986 if (cfqq->cfqg->nr_cfqq > 1)
3987 return false;
3988
7700fc4f
SL
3989 /* the only queue in the group, but think time is big */
3990 if (cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true))
3991 return false;
3992
7667aa06
VG
3993 if (cfq_slice_used(cfqq))
3994 return true;
3995
3996 /* if slice left is less than think time, wait busy */
383cd721
SL
3997 if (cic && sample_valid(cic->ttime.ttime_samples)
3998 && (cfqq->slice_end - jiffies < cic->ttime.ttime_mean))
7667aa06
VG
3999 return true;
4000
4001 /*
4002 * If think times is less than a jiffy than ttime_mean=0 and above
4003 * will not be true. It might happen that slice has not expired yet
4004 * but will expire soon (4-5 ns) during select_queue(). To cover the
4005 * case where think time is less than a jiffy, mark the queue wait
4006 * busy if only 1 jiffy is left in the slice.
4007 */
4008 if (cfqq->slice_end - jiffies == 1)
4009 return true;
4010
4011 return false;
4012}
4013
165125e1 4014static void cfq_completed_request(struct request_queue *q, struct request *rq)
1da177e4 4015{
5e705374 4016 struct cfq_queue *cfqq = RQ_CFQQ(rq);
b4878f24 4017 struct cfq_data *cfqd = cfqq->cfqd;
5380a101 4018 const int sync = rq_is_sync(rq);
b4878f24 4019 unsigned long now;
1da177e4 4020
b4878f24 4021 now = jiffies;
33659ebb
CH
4022 cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d",
4023 !!(rq->cmd_flags & REQ_NOIDLE));
1da177e4 4024
45333d5a
AC
4025 cfq_update_hw_tag(cfqd);
4026
53c583d2 4027 WARN_ON(!cfqd->rq_in_driver);
6d048f53 4028 WARN_ON(!cfqq->dispatched);
53c583d2 4029 cfqd->rq_in_driver--;
6d048f53 4030 cfqq->dispatched--;
80bdf0c7 4031 (RQ_CFQG(rq))->dispatched--;
155fead9
TH
4032 cfqg_stats_update_completion(cfqq->cfqg, rq_start_time_ns(rq),
4033 rq_io_start_time_ns(rq), rq->cmd_flags);
1da177e4 4034
53c583d2 4035 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
3ed9a296 4036
365722bb 4037 if (sync) {
34b98d03 4038 struct cfq_rb_root *st;
f5f2b6ce 4039
383cd721 4040 RQ_CIC(rq)->ttime.last_end_request = now;
f5f2b6ce
SL
4041
4042 if (cfq_cfqq_on_rr(cfqq))
34b98d03 4043 st = cfqq->service_tree;
f5f2b6ce 4044 else
34b98d03
VG
4045 st = st_for(cfqq->cfqg, cfqq_class(cfqq),
4046 cfqq_type(cfqq));
4047
4048 st->ttime.last_end_request = now;
573412b2
CZ
4049 if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now))
4050 cfqd->last_delayed_sync = now;
365722bb 4051 }
caaa5f9f 4052
7700fc4f
SL
4053#ifdef CONFIG_CFQ_GROUP_IOSCHED
4054 cfqq->cfqg->ttime.last_end_request = now;
4055#endif
4056
caaa5f9f
JA
4057 /*
4058 * If this is the active queue, check if it needs to be expired,
4059 * or if we want to idle in case it has no pending requests.
4060 */
4061 if (cfqd->active_queue == cfqq) {
a36e71f9
JA
4062 const bool cfqq_empty = RB_EMPTY_ROOT(&cfqq->sort_list);
4063
44f7c160
JA
4064 if (cfq_cfqq_slice_new(cfqq)) {
4065 cfq_set_prio_slice(cfqd, cfqq);
4066 cfq_clear_cfqq_slice_new(cfqq);
4067 }
f75edf2d
VG
4068
4069 /*
7667aa06
VG
4070 * Should we wait for next request to come in before we expire
4071 * the queue.
f75edf2d 4072 */
7667aa06 4073 if (cfq_should_wait_busy(cfqd, cfqq)) {
80bdf0c7
VG
4074 unsigned long extend_sl = cfqd->cfq_slice_idle;
4075 if (!cfqd->cfq_slice_idle)
4076 extend_sl = cfqd->cfq_group_idle;
4077 cfqq->slice_end = jiffies + extend_sl;
f75edf2d 4078 cfq_mark_cfqq_wait_busy(cfqq);
b1ffe737 4079 cfq_log_cfqq(cfqd, cfqq, "will busy wait");
f75edf2d
VG
4080 }
4081
a36e71f9 4082 /*
8e550632
CZ
4083 * Idling is not enabled on:
4084 * - expired queues
4085 * - idle-priority queues
4086 * - async queues
4087 * - queues with still some requests queued
4088 * - when there is a close cooperator
a36e71f9 4089 */
0871714e 4090 if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
e5ff082e 4091 cfq_slice_expired(cfqd, 1);
8e550632
CZ
4092 else if (sync && cfqq_empty &&
4093 !cfq_close_cooperator(cfqd, cfqq)) {
749ef9f8 4094 cfq_arm_slice_timer(cfqd);
8e550632 4095 }
caaa5f9f 4096 }
6d048f53 4097
53c583d2 4098 if (!cfqd->rq_in_driver)
23e018a1 4099 cfq_schedule_dispatch(cfqd);
1da177e4
LT
4100}
4101
89850f7e 4102static inline int __cfq_may_queue(struct cfq_queue *cfqq)
22e2c507 4103{
1b379d8d 4104 if (cfq_cfqq_wait_request(cfqq) && !cfq_cfqq_must_alloc_slice(cfqq)) {
3b18152c 4105 cfq_mark_cfqq_must_alloc_slice(cfqq);
22e2c507 4106 return ELV_MQUEUE_MUST;
3b18152c 4107 }
1da177e4 4108
22e2c507 4109 return ELV_MQUEUE_MAY;
22e2c507
JA
4110}
4111
165125e1 4112static int cfq_may_queue(struct request_queue *q, int rw)
22e2c507
JA
4113{
4114 struct cfq_data *cfqd = q->elevator->elevator_data;
4115 struct task_struct *tsk = current;
c5869807 4116 struct cfq_io_cq *cic;
22e2c507
JA
4117 struct cfq_queue *cfqq;
4118
4119 /*
4120 * don't force setup of a queue from here, as a call to may_queue
4121 * does not necessarily imply that a request actually will be queued.
4122 * so just lookup a possibly existing queue, or return 'may queue'
4123 * if that fails
4124 */
4ac845a2 4125 cic = cfq_cic_lookup(cfqd, tsk->io_context);
91fac317
VT
4126 if (!cic)
4127 return ELV_MQUEUE_MAY;
4128
b0b78f81 4129 cfqq = cic_to_cfqq(cic, rw_is_sync(rw));
22e2c507 4130 if (cfqq) {
abede6da 4131 cfq_init_prio_data(cfqq, cic);
22e2c507 4132
89850f7e 4133 return __cfq_may_queue(cfqq);
22e2c507
JA
4134 }
4135
4136 return ELV_MQUEUE_MAY;
1da177e4
LT
4137}
4138
1da177e4
LT
4139/*
4140 * queue lock held here
4141 */
bb37b94c 4142static void cfq_put_request(struct request *rq)
1da177e4 4143{
5e705374 4144 struct cfq_queue *cfqq = RQ_CFQQ(rq);
1da177e4 4145
5e705374 4146 if (cfqq) {
22e2c507 4147 const int rw = rq_data_dir(rq);
1da177e4 4148
22e2c507
JA
4149 BUG_ON(!cfqq->allocated[rw]);
4150 cfqq->allocated[rw]--;
1da177e4 4151
7f1dc8a2 4152 /* Put down rq reference on cfqg */
eb7d8c07 4153 cfqg_put(RQ_CFQG(rq));
a612fddf
TH
4154 rq->elv.priv[0] = NULL;
4155 rq->elv.priv[1] = NULL;
7f1dc8a2 4156
1da177e4
LT
4157 cfq_put_queue(cfqq);
4158 }
4159}
4160
df5fe3e8 4161static struct cfq_queue *
c5869807 4162cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_cq *cic,
df5fe3e8
JM
4163 struct cfq_queue *cfqq)
4164{
4165 cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq);
4166 cic_set_cfqq(cic, cfqq->new_cfqq, 1);
b3b6d040 4167 cfq_mark_cfqq_coop(cfqq->new_cfqq);
df5fe3e8
JM
4168 cfq_put_queue(cfqq);
4169 return cic_to_cfqq(cic, 1);
4170}
4171
e6c5bc73
JM
4172/*
4173 * Returns NULL if a new cfqq should be allocated, or the old cfqq if this
4174 * was the last process referring to said cfqq.
4175 */
4176static struct cfq_queue *
c5869807 4177split_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq)
e6c5bc73
JM
4178{
4179 if (cfqq_process_refs(cfqq) == 1) {
e6c5bc73
JM
4180 cfqq->pid = current->pid;
4181 cfq_clear_cfqq_coop(cfqq);
ae54abed 4182 cfq_clear_cfqq_split_coop(cfqq);
e6c5bc73
JM
4183 return cfqq;
4184 }
4185
4186 cic_set_cfqq(cic, NULL, 1);
d02a2c07
SL
4187
4188 cfq_put_cooperator(cfqq);
4189
e6c5bc73
JM
4190 cfq_put_queue(cfqq);
4191 return NULL;
4192}
1da177e4 4193/*
22e2c507 4194 * Allocate cfq data structures associated with this request.
1da177e4 4195 */
22e2c507 4196static int
852c788f
TH
4197cfq_set_request(struct request_queue *q, struct request *rq, struct bio *bio,
4198 gfp_t gfp_mask)
1da177e4
LT
4199{
4200 struct cfq_data *cfqd = q->elevator->elevator_data;
f1f8cc94 4201 struct cfq_io_cq *cic = icq_to_cic(rq->elv.icq);
1da177e4 4202 const int rw = rq_data_dir(rq);
a6151c3a 4203 const bool is_sync = rq_is_sync(rq);
22e2c507 4204 struct cfq_queue *cfqq;
1da177e4
LT
4205
4206 might_sleep_if(gfp_mask & __GFP_WAIT);
4207
216284c3 4208 spin_lock_irq(q->queue_lock);
f1f8cc94 4209
598971bf
TH
4210 check_ioprio_changed(cic, bio);
4211 check_blkcg_changed(cic, bio);
e6c5bc73 4212new_queue:
91fac317 4213 cfqq = cic_to_cfqq(cic, is_sync);
32f2e807 4214 if (!cfqq || cfqq == &cfqd->oom_cfqq) {
abede6da 4215 cfqq = cfq_get_queue(cfqd, is_sync, cic, bio, gfp_mask);
91fac317 4216 cic_set_cfqq(cic, cfqq, is_sync);
df5fe3e8 4217 } else {
e6c5bc73
JM
4218 /*
4219 * If the queue was seeky for too long, break it apart.
4220 */
ae54abed 4221 if (cfq_cfqq_coop(cfqq) && cfq_cfqq_split_coop(cfqq)) {
e6c5bc73
JM
4222 cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq");
4223 cfqq = split_cfqq(cic, cfqq);
4224 if (!cfqq)
4225 goto new_queue;
4226 }
4227
df5fe3e8
JM
4228 /*
4229 * Check to see if this queue is scheduled to merge with
4230 * another, closely cooperating queue. The merging of
4231 * queues happens here as it must be done in process context.
4232 * The reference on new_cfqq was taken in merge_cfqqs.
4233 */
4234 if (cfqq->new_cfqq)
4235 cfqq = cfq_merge_cfqqs(cfqd, cic, cfqq);
91fac317 4236 }
1da177e4
LT
4237
4238 cfqq->allocated[rw]++;
1da177e4 4239
6fae9c25 4240 cfqq->ref++;
eb7d8c07 4241 cfqg_get(cfqq->cfqg);
a612fddf 4242 rq->elv.priv[0] = cfqq;
1adaf3dd 4243 rq->elv.priv[1] = cfqq->cfqg;
216284c3 4244 spin_unlock_irq(q->queue_lock);
5e705374 4245 return 0;
1da177e4
LT
4246}
4247
65f27f38 4248static void cfq_kick_queue(struct work_struct *work)
22e2c507 4249{
65f27f38 4250 struct cfq_data *cfqd =
23e018a1 4251 container_of(work, struct cfq_data, unplug_work);
165125e1 4252 struct request_queue *q = cfqd->queue;
22e2c507 4253
40bb54d1 4254 spin_lock_irq(q->queue_lock);
24ecfbe2 4255 __blk_run_queue(cfqd->queue);
40bb54d1 4256 spin_unlock_irq(q->queue_lock);
22e2c507
JA
4257}
4258
4259/*
4260 * Timer running if the active_queue is currently idling inside its time slice
4261 */
4262static void cfq_idle_slice_timer(unsigned long data)
4263{
4264 struct cfq_data *cfqd = (struct cfq_data *) data;
4265 struct cfq_queue *cfqq;
4266 unsigned long flags;
3c6bd2f8 4267 int timed_out = 1;
22e2c507 4268
7b679138
JA
4269 cfq_log(cfqd, "idle timer fired");
4270
22e2c507
JA
4271 spin_lock_irqsave(cfqd->queue->queue_lock, flags);
4272
fe094d98
JA
4273 cfqq = cfqd->active_queue;
4274 if (cfqq) {
3c6bd2f8
JA
4275 timed_out = 0;
4276
b029195d
JA
4277 /*
4278 * We saw a request before the queue expired, let it through
4279 */
4280 if (cfq_cfqq_must_dispatch(cfqq))
4281 goto out_kick;
4282
22e2c507
JA
4283 /*
4284 * expired
4285 */
44f7c160 4286 if (cfq_slice_used(cfqq))
22e2c507
JA
4287 goto expire;
4288
4289 /*
4290 * only expire and reinvoke request handler, if there are
4291 * other queues with pending requests
4292 */
caaa5f9f 4293 if (!cfqd->busy_queues)
22e2c507 4294 goto out_cont;
22e2c507
JA
4295
4296 /*
4297 * not expired and it has a request pending, let it dispatch
4298 */
75e50984 4299 if (!RB_EMPTY_ROOT(&cfqq->sort_list))
22e2c507 4300 goto out_kick;
76280aff
CZ
4301
4302 /*
4303 * Queue depth flag is reset only when the idle didn't succeed
4304 */
4305 cfq_clear_cfqq_deep(cfqq);
22e2c507
JA
4306 }
4307expire:
e5ff082e 4308 cfq_slice_expired(cfqd, timed_out);
22e2c507 4309out_kick:
23e018a1 4310 cfq_schedule_dispatch(cfqd);
22e2c507
JA
4311out_cont:
4312 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
4313}
4314
3b18152c
JA
4315static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
4316{
4317 del_timer_sync(&cfqd->idle_slice_timer);
23e018a1 4318 cancel_work_sync(&cfqd->unplug_work);
3b18152c 4319}
22e2c507 4320
c2dea2d1
VT
4321static void cfq_put_async_queues(struct cfq_data *cfqd)
4322{
4323 int i;
4324
4325 for (i = 0; i < IOPRIO_BE_NR; i++) {
4326 if (cfqd->async_cfqq[0][i])
4327 cfq_put_queue(cfqd->async_cfqq[0][i]);
4328 if (cfqd->async_cfqq[1][i])
4329 cfq_put_queue(cfqd->async_cfqq[1][i]);
c2dea2d1 4330 }
2389d1ef
ON
4331
4332 if (cfqd->async_idle_cfqq)
4333 cfq_put_queue(cfqd->async_idle_cfqq);
c2dea2d1
VT
4334}
4335
b374d18a 4336static void cfq_exit_queue(struct elevator_queue *e)
1da177e4 4337{
22e2c507 4338 struct cfq_data *cfqd = e->elevator_data;
165125e1 4339 struct request_queue *q = cfqd->queue;
22e2c507 4340
3b18152c 4341 cfq_shutdown_timer_wq(cfqd);
e2d74ac0 4342
d9ff4187 4343 spin_lock_irq(q->queue_lock);
e2d74ac0 4344
d9ff4187 4345 if (cfqd->active_queue)
e5ff082e 4346 __cfq_slice_expired(cfqd, cfqd->active_queue, 0);
e2d74ac0 4347
c2dea2d1 4348 cfq_put_async_queues(cfqd);
03aa264a
TH
4349
4350 spin_unlock_irq(q->queue_lock);
4351
a90d742e
AV
4352 cfq_shutdown_timer_wq(cfqd);
4353
ffea73fc
TH
4354#ifdef CONFIG_CFQ_GROUP_IOSCHED
4355 blkcg_deactivate_policy(q, &blkcg_policy_cfq);
4356#else
f51b802c 4357 kfree(cfqd->root_group);
2abae55f 4358#endif
56edf7d7 4359 kfree(cfqd);
1da177e4
LT
4360}
4361
d50235b7 4362static int cfq_init_queue(struct request_queue *q, struct elevator_type *e)
1da177e4
LT
4363{
4364 struct cfq_data *cfqd;
3c798398 4365 struct blkcg_gq *blkg __maybe_unused;
a2b1693b 4366 int i, ret;
d50235b7
JM
4367 struct elevator_queue *eq;
4368
4369 eq = elevator_alloc(q, e);
4370 if (!eq)
4371 return -ENOMEM;
1da177e4 4372
c1b511eb 4373 cfqd = kzalloc_node(sizeof(*cfqd), GFP_KERNEL, q->node);
d50235b7
JM
4374 if (!cfqd) {
4375 kobject_put(&eq->kobj);
b2fab5ac 4376 return -ENOMEM;
d50235b7
JM
4377 }
4378 eq->elevator_data = cfqd;
80b15c73 4379
f51b802c 4380 cfqd->queue = q;
d50235b7
JM
4381 spin_lock_irq(q->queue_lock);
4382 q->elevator = eq;
4383 spin_unlock_irq(q->queue_lock);
f51b802c 4384
1fa8f6d6
VG
4385 /* Init root service tree */
4386 cfqd->grp_service_tree = CFQ_RB_ROOT;
4387
f51b802c 4388 /* Init root group and prefer root group over other groups by default */
25fb5169 4389#ifdef CONFIG_CFQ_GROUP_IOSCHED
3c798398 4390 ret = blkcg_activate_policy(q, &blkcg_policy_cfq);
a2b1693b
TH
4391 if (ret)
4392 goto out_free;
f51b802c 4393
a2b1693b 4394 cfqd->root_group = blkg_to_cfqg(q->root_blkg);
f51b802c 4395#else
a2b1693b 4396 ret = -ENOMEM;
f51b802c
TH
4397 cfqd->root_group = kzalloc_node(sizeof(*cfqd->root_group),
4398 GFP_KERNEL, cfqd->queue->node);
a2b1693b
TH
4399 if (!cfqd->root_group)
4400 goto out_free;
5624a4e4 4401
a2b1693b
TH
4402 cfq_init_cfqg_base(cfqd->root_group);
4403#endif
3381cb8d 4404 cfqd->root_group->weight = 2 * CFQ_WEIGHT_DEFAULT;
e71357e1 4405 cfqd->root_group->leaf_weight = 2 * CFQ_WEIGHT_DEFAULT;
5624a4e4 4406
26a2ac00
JA
4407 /*
4408 * Not strictly needed (since RB_ROOT just clears the node and we
4409 * zeroed cfqd on alloc), but better be safe in case someone decides
4410 * to add magic to the rb code
4411 */
4412 for (i = 0; i < CFQ_PRIO_LISTS; i++)
4413 cfqd->prio_trees[i] = RB_ROOT;
4414
6118b70b
JA
4415 /*
4416 * Our fallback cfqq if cfq_find_alloc_queue() runs into OOM issues.
4417 * Grab a permanent reference to it, so that the normal code flow
f51b802c
TH
4418 * will not attempt to free it. oom_cfqq is linked to root_group
4419 * but shouldn't hold a reference as it'll never be unlinked. Lose
4420 * the reference from linking right away.
6118b70b
JA
4421 */
4422 cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
30d7b944 4423 cfqd->oom_cfqq.ref++;
1adaf3dd
TH
4424
4425 spin_lock_irq(q->queue_lock);
f51b802c 4426 cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, cfqd->root_group);
eb7d8c07 4427 cfqg_put(cfqd->root_group);
1adaf3dd 4428 spin_unlock_irq(q->queue_lock);
1da177e4 4429
22e2c507
JA
4430 init_timer(&cfqd->idle_slice_timer);
4431 cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
4432 cfqd->idle_slice_timer.data = (unsigned long) cfqd;
4433
23e018a1 4434 INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
22e2c507 4435
1da177e4 4436 cfqd->cfq_quantum = cfq_quantum;
22e2c507
JA
4437 cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
4438 cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
1da177e4
LT
4439 cfqd->cfq_back_max = cfq_back_max;
4440 cfqd->cfq_back_penalty = cfq_back_penalty;
22e2c507
JA
4441 cfqd->cfq_slice[0] = cfq_slice_async;
4442 cfqd->cfq_slice[1] = cfq_slice_sync;
5bf14c07 4443 cfqd->cfq_target_latency = cfq_target_latency;
22e2c507
JA
4444 cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
4445 cfqd->cfq_slice_idle = cfq_slice_idle;
80bdf0c7 4446 cfqd->cfq_group_idle = cfq_group_idle;
963b72fc 4447 cfqd->cfq_latency = 1;
e459dd08 4448 cfqd->hw_tag = -1;
edc71131
CZ
4449 /*
4450 * we optimistically start assuming sync ops weren't delayed in last
4451 * second, in order to have larger depth for async operations.
4452 */
573412b2 4453 cfqd->last_delayed_sync = jiffies - HZ;
b2fab5ac 4454 return 0;
a2b1693b
TH
4455
4456out_free:
4457 kfree(cfqd);
d50235b7 4458 kobject_put(&eq->kobj);
a2b1693b 4459 return ret;
1da177e4
LT
4460}
4461
1da177e4
LT
4462/*
4463 * sysfs parts below -->
4464 */
1da177e4
LT
4465static ssize_t
4466cfq_var_show(unsigned int var, char *page)
4467{
176167ad 4468 return sprintf(page, "%u\n", var);
1da177e4
LT
4469}
4470
4471static ssize_t
4472cfq_var_store(unsigned int *var, const char *page, size_t count)
4473{
4474 char *p = (char *) page;
4475
4476 *var = simple_strtoul(p, &p, 10);
4477 return count;
4478}
4479
1da177e4 4480#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
b374d18a 4481static ssize_t __FUNC(struct elevator_queue *e, char *page) \
1da177e4 4482{ \
3d1ab40f 4483 struct cfq_data *cfqd = e->elevator_data; \
1da177e4
LT
4484 unsigned int __data = __VAR; \
4485 if (__CONV) \
4486 __data = jiffies_to_msecs(__data); \
4487 return cfq_var_show(__data, (page)); \
4488}
4489SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
22e2c507
JA
4490SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
4491SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
e572ec7e
AV
4492SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
4493SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
22e2c507 4494SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
80bdf0c7 4495SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1);
22e2c507
JA
4496SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
4497SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
4498SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
963b72fc 4499SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
5bf14c07 4500SHOW_FUNCTION(cfq_target_latency_show, cfqd->cfq_target_latency, 1);
1da177e4
LT
4501#undef SHOW_FUNCTION
4502
4503#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
b374d18a 4504static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
1da177e4 4505{ \
3d1ab40f 4506 struct cfq_data *cfqd = e->elevator_data; \
1da177e4
LT
4507 unsigned int __data; \
4508 int ret = cfq_var_store(&__data, (page), count); \
4509 if (__data < (MIN)) \
4510 __data = (MIN); \
4511 else if (__data > (MAX)) \
4512 __data = (MAX); \
4513 if (__CONV) \
4514 *(__PTR) = msecs_to_jiffies(__data); \
4515 else \
4516 *(__PTR) = __data; \
4517 return ret; \
4518}
4519STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
fe094d98
JA
4520STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1,
4521 UINT_MAX, 1);
4522STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1,
4523 UINT_MAX, 1);
e572ec7e 4524STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
fe094d98
JA
4525STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
4526 UINT_MAX, 0);
22e2c507 4527STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
80bdf0c7 4528STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1);
22e2c507
JA
4529STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
4530STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
fe094d98
JA
4531STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
4532 UINT_MAX, 0);
963b72fc 4533STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
5bf14c07 4534STORE_FUNCTION(cfq_target_latency_store, &cfqd->cfq_target_latency, 1, UINT_MAX, 1);
1da177e4
LT
4535#undef STORE_FUNCTION
4536
e572ec7e
AV
4537#define CFQ_ATTR(name) \
4538 __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)
4539
4540static struct elv_fs_entry cfq_attrs[] = {
4541 CFQ_ATTR(quantum),
e572ec7e
AV
4542 CFQ_ATTR(fifo_expire_sync),
4543 CFQ_ATTR(fifo_expire_async),
4544 CFQ_ATTR(back_seek_max),
4545 CFQ_ATTR(back_seek_penalty),
4546 CFQ_ATTR(slice_sync),
4547 CFQ_ATTR(slice_async),
4548 CFQ_ATTR(slice_async_rq),
4549 CFQ_ATTR(slice_idle),
80bdf0c7 4550 CFQ_ATTR(group_idle),
963b72fc 4551 CFQ_ATTR(low_latency),
5bf14c07 4552 CFQ_ATTR(target_latency),
e572ec7e 4553 __ATTR_NULL
1da177e4
LT
4554};
4555
1da177e4
LT
4556static struct elevator_type iosched_cfq = {
4557 .ops = {
4558 .elevator_merge_fn = cfq_merge,
4559 .elevator_merged_fn = cfq_merged_request,
4560 .elevator_merge_req_fn = cfq_merged_requests,
da775265 4561 .elevator_allow_merge_fn = cfq_allow_merge,
812d4026 4562 .elevator_bio_merged_fn = cfq_bio_merged,
b4878f24 4563 .elevator_dispatch_fn = cfq_dispatch_requests,
1da177e4 4564 .elevator_add_req_fn = cfq_insert_request,
b4878f24 4565 .elevator_activate_req_fn = cfq_activate_request,
1da177e4 4566 .elevator_deactivate_req_fn = cfq_deactivate_request,
1da177e4 4567 .elevator_completed_req_fn = cfq_completed_request,
21183b07
JA
4568 .elevator_former_req_fn = elv_rb_former_request,
4569 .elevator_latter_req_fn = elv_rb_latter_request,
9b84cacd 4570 .elevator_init_icq_fn = cfq_init_icq,
7e5a8794 4571 .elevator_exit_icq_fn = cfq_exit_icq,
1da177e4
LT
4572 .elevator_set_req_fn = cfq_set_request,
4573 .elevator_put_req_fn = cfq_put_request,
4574 .elevator_may_queue_fn = cfq_may_queue,
4575 .elevator_init_fn = cfq_init_queue,
4576 .elevator_exit_fn = cfq_exit_queue,
4577 },
3d3c2379
TH
4578 .icq_size = sizeof(struct cfq_io_cq),
4579 .icq_align = __alignof__(struct cfq_io_cq),
3d1ab40f 4580 .elevator_attrs = cfq_attrs,
3d3c2379 4581 .elevator_name = "cfq",
1da177e4
LT
4582 .elevator_owner = THIS_MODULE,
4583};
4584
3e252066 4585#ifdef CONFIG_CFQ_GROUP_IOSCHED
3c798398 4586static struct blkcg_policy blkcg_policy_cfq = {
f9fcc2d3
TH
4587 .pd_size = sizeof(struct cfq_group),
4588 .cftypes = cfq_blkcg_files,
4589
4590 .pd_init_fn = cfq_pd_init,
0b39920b 4591 .pd_offline_fn = cfq_pd_offline,
f9fcc2d3 4592 .pd_reset_stats_fn = cfq_pd_reset_stats,
3e252066 4593};
3e252066
VG
4594#endif
4595
1da177e4
LT
4596static int __init cfq_init(void)
4597{
3d3c2379
TH
4598 int ret;
4599
22e2c507
JA
4600 /*
4601 * could be 0 on HZ < 1000 setups
4602 */
4603 if (!cfq_slice_async)
4604 cfq_slice_async = 1;
4605 if (!cfq_slice_idle)
4606 cfq_slice_idle = 1;
4607
80bdf0c7
VG
4608#ifdef CONFIG_CFQ_GROUP_IOSCHED
4609 if (!cfq_group_idle)
4610 cfq_group_idle = 1;
8bd435b3 4611
3c798398 4612 ret = blkcg_policy_register(&blkcg_policy_cfq);
8bd435b3
TH
4613 if (ret)
4614 return ret;
ffea73fc
TH
4615#else
4616 cfq_group_idle = 0;
4617#endif
8bd435b3 4618
fd794956 4619 ret = -ENOMEM;
3d3c2379
TH
4620 cfq_pool = KMEM_CACHE(cfq_queue, 0);
4621 if (!cfq_pool)
8bd435b3 4622 goto err_pol_unreg;
1da177e4 4623
3d3c2379 4624 ret = elv_register(&iosched_cfq);
8bd435b3
TH
4625 if (ret)
4626 goto err_free_pool;
3d3c2379 4627
2fdd82bd 4628 return 0;
8bd435b3
TH
4629
4630err_free_pool:
4631 kmem_cache_destroy(cfq_pool);
4632err_pol_unreg:
ffea73fc 4633#ifdef CONFIG_CFQ_GROUP_IOSCHED
3c798398 4634 blkcg_policy_unregister(&blkcg_policy_cfq);
ffea73fc 4635#endif
8bd435b3 4636 return ret;
1da177e4
LT
4637}
4638
4639static void __exit cfq_exit(void)
4640{
ffea73fc 4641#ifdef CONFIG_CFQ_GROUP_IOSCHED
3c798398 4642 blkcg_policy_unregister(&blkcg_policy_cfq);
ffea73fc 4643#endif
1da177e4 4644 elv_unregister(&iosched_cfq);
3d3c2379 4645 kmem_cache_destroy(cfq_pool);
1da177e4
LT
4646}
4647
4648module_init(cfq_init);
4649module_exit(cfq_exit);
4650
4651MODULE_AUTHOR("Jens Axboe");
4652MODULE_LICENSE("GPL");
4653MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");