mfd: kempld-core: Constify variables that point to const structure
[linux-2.6-block.git] / block / cfq-iosched.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * CFQ, or complete fairness queueing, disk scheduler.
3 *
4 * Based on ideas from a previously unfinished io
5 * scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
6 *
0fe23479 7 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
1da177e4 8 */
1da177e4 9#include <linux/module.h>
5a0e3ad6 10#include <linux/slab.h>
e6017571 11#include <linux/sched/clock.h>
1cc9be68
AV
12#include <linux/blkdev.h>
13#include <linux/elevator.h>
9a7f38c4 14#include <linux/ktime.h>
1da177e4 15#include <linux/rbtree.h>
22e2c507 16#include <linux/ioprio.h>
7b679138 17#include <linux/blktrace_api.h>
eea8f41c 18#include <linux/blk-cgroup.h>
6e736be7 19#include "blk.h"
87760e5e 20#include "blk-wbt.h"
1da177e4
LT
21
22/*
23 * tunables
24 */
fe094d98 25/* max queue in one round of service */
abc3c744 26static const int cfq_quantum = 8;
9a7f38c4 27static const u64 cfq_fifo_expire[2] = { NSEC_PER_SEC / 4, NSEC_PER_SEC / 8 };
fe094d98
JA
28/* maximum backwards seek, in KiB */
29static const int cfq_back_max = 16 * 1024;
30/* penalty of a backwards seek */
31static const int cfq_back_penalty = 2;
9a7f38c4
JM
32static const u64 cfq_slice_sync = NSEC_PER_SEC / 10;
33static u64 cfq_slice_async = NSEC_PER_SEC / 25;
64100099 34static const int cfq_slice_async_rq = 2;
9a7f38c4
JM
35static u64 cfq_slice_idle = NSEC_PER_SEC / 125;
36static u64 cfq_group_idle = NSEC_PER_SEC / 125;
37static const u64 cfq_target_latency = (u64)NSEC_PER_SEC * 3/10; /* 300 ms */
5db5d642 38static const int cfq_hist_divisor = 4;
22e2c507 39
d9e7620e 40/*
5be6b756 41 * offset from end of queue service tree for idle class
d9e7620e 42 */
9a7f38c4 43#define CFQ_IDLE_DELAY (NSEC_PER_SEC / 5)
5be6b756
HT
44/* offset from end of group service tree under time slice mode */
45#define CFQ_SLICE_MODE_GROUP_DELAY (NSEC_PER_SEC / 5)
46/* offset from end of group service under IOPS mode */
47#define CFQ_IOPS_MODE_GROUP_DELAY (HZ / 5)
d9e7620e
JA
48
49/*
50 * below this threshold, we consider thinktime immediate
51 */
9a7f38c4 52#define CFQ_MIN_TT (2 * NSEC_PER_SEC / HZ)
d9e7620e 53
22e2c507 54#define CFQ_SLICE_SCALE (5)
45333d5a 55#define CFQ_HW_QUEUE_MIN (5)
25bc6b07 56#define CFQ_SERVICE_SHIFT 12
22e2c507 57
3dde36dd 58#define CFQQ_SEEK_THR (sector_t)(8 * 100)
e9ce335d 59#define CFQQ_CLOSE_THR (sector_t)(8 * 1024)
41647e7a 60#define CFQQ_SECT_THR_NONROT (sector_t)(2 * 32)
3dde36dd 61#define CFQQ_SEEKY(cfqq) (hweight32(cfqq->seek_history) > 32/8)
ae54abed 62
a612fddf
TH
63#define RQ_CIC(rq) icq_to_cic((rq)->elv.icq)
64#define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elv.priv[0])
65#define RQ_CFQG(rq) (struct cfq_group *) ((rq)->elv.priv[1])
1da177e4 66
e18b890b 67static struct kmem_cache *cfq_pool;
1da177e4 68
22e2c507
JA
69#define CFQ_PRIO_LISTS IOPRIO_BE_NR
70#define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
22e2c507
JA
71#define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
72
206dc69b 73#define sample_valid(samples) ((samples) > 80)
1fa8f6d6 74#define rb_entry_cfqg(node) rb_entry((node), struct cfq_group, rb_node)
206dc69b 75
e48453c3 76/* blkio-related constants */
3ecca629
TH
77#define CFQ_WEIGHT_LEGACY_MIN 10
78#define CFQ_WEIGHT_LEGACY_DFL 500
79#define CFQ_WEIGHT_LEGACY_MAX 1000
e48453c3 80
c5869807 81struct cfq_ttime {
9a7f38c4 82 u64 last_end_request;
c5869807 83
9a7f38c4
JM
84 u64 ttime_total;
85 u64 ttime_mean;
c5869807 86 unsigned long ttime_samples;
c5869807
TH
87};
88
cc09e299
JA
89/*
90 * Most of our rbtree usage is for sorting with min extraction, so
91 * if we cache the leftmost node we don't have to walk down the tree
92 * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should
93 * move this into the elevator for the rq sorting as well.
94 */
95struct cfq_rb_root {
09663c86 96 struct rb_root_cached rb;
f0f1a45f 97 struct rb_node *rb_rightmost;
aa6f6a3d 98 unsigned count;
1fa8f6d6 99 u64 min_vdisktime;
f5f2b6ce 100 struct cfq_ttime ttime;
cc09e299 101};
09663c86 102#define CFQ_RB_ROOT (struct cfq_rb_root) { .rb = RB_ROOT_CACHED, \
f0f1a45f 103 .rb_rightmost = NULL, \
9a7f38c4 104 .ttime = {.last_end_request = ktime_get_ns(),},}
cc09e299 105
6118b70b
JA
106/*
107 * Per process-grouping structure
108 */
109struct cfq_queue {
110 /* reference count */
30d7b944 111 int ref;
6118b70b
JA
112 /* various state flags, see below */
113 unsigned int flags;
114 /* parent cfq_data */
115 struct cfq_data *cfqd;
116 /* service_tree member */
117 struct rb_node rb_node;
118 /* service_tree key */
9a7f38c4 119 u64 rb_key;
6118b70b
JA
120 /* prio tree member */
121 struct rb_node p_node;
122 /* prio tree root we belong to, if any */
123 struct rb_root *p_root;
124 /* sorted list of pending requests */
125 struct rb_root sort_list;
126 /* if fifo isn't expired, next request to serve */
127 struct request *next_rq;
128 /* requests queued in sort_list */
129 int queued[2];
130 /* currently allocated requests */
131 int allocated[2];
132 /* fifo list of requests in sort_list */
133 struct list_head fifo;
134
dae739eb 135 /* time when queue got scheduled in to dispatch first request. */
9a7f38c4
JM
136 u64 dispatch_start;
137 u64 allocated_slice;
138 u64 slice_dispatch;
dae739eb 139 /* time when first request from queue completed and slice started. */
9a7f38c4
JM
140 u64 slice_start;
141 u64 slice_end;
93fdf147 142 s64 slice_resid;
6118b70b 143
65299a3b
CH
144 /* pending priority requests */
145 int prio_pending;
6118b70b
JA
146 /* number of requests that are on the dispatch list or inside driver */
147 int dispatched;
148
149 /* io prio of this group */
150 unsigned short ioprio, org_ioprio;
b8269db4 151 unsigned short ioprio_class, org_ioprio_class;
6118b70b 152
c4081ba5
RK
153 pid_t pid;
154
3dde36dd 155 u32 seek_history;
b2c18e1e
JM
156 sector_t last_request_pos;
157
aa6f6a3d 158 struct cfq_rb_root *service_tree;
df5fe3e8 159 struct cfq_queue *new_cfqq;
cdb16e8f 160 struct cfq_group *cfqg;
c4e7893e
VG
161 /* Number of sectors dispatched from queue in single dispatch round */
162 unsigned long nr_sectors;
6118b70b
JA
163};
164
c0324a02 165/*
718eee05 166 * First index in the service_trees.
c0324a02
CZ
167 * IDLE is handled separately, so it has negative index
168 */
3bf10fea 169enum wl_class_t {
c0324a02 170 BE_WORKLOAD = 0,
615f0259
VG
171 RT_WORKLOAD = 1,
172 IDLE_WORKLOAD = 2,
b4627321 173 CFQ_PRIO_NR,
c0324a02
CZ
174};
175
718eee05
CZ
176/*
177 * Second index in the service_trees.
178 */
179enum wl_type_t {
180 ASYNC_WORKLOAD = 0,
181 SYNC_NOIDLE_WORKLOAD = 1,
182 SYNC_WORKLOAD = 2
183};
184
155fead9
TH
185struct cfqg_stats {
186#ifdef CONFIG_CFQ_GROUP_IOSCHED
155fead9
TH
187 /* number of ios merged */
188 struct blkg_rwstat merged;
189 /* total time spent on device in ns, may not be accurate w/ queueing */
190 struct blkg_rwstat service_time;
191 /* total time spent waiting in scheduler queue in ns */
192 struct blkg_rwstat wait_time;
193 /* number of IOs queued up */
194 struct blkg_rwstat queued;
155fead9
TH
195 /* total disk time and nr sectors dispatched by this group */
196 struct blkg_stat time;
197#ifdef CONFIG_DEBUG_BLK_CGROUP
198 /* time not charged to this cgroup */
199 struct blkg_stat unaccounted_time;
200 /* sum of number of ios queued across all samples */
201 struct blkg_stat avg_queue_size_sum;
202 /* count of samples taken for average */
203 struct blkg_stat avg_queue_size_samples;
204 /* how many times this group has been removed from service tree */
205 struct blkg_stat dequeue;
206 /* total time spent waiting for it to be assigned a timeslice. */
207 struct blkg_stat group_wait_time;
3c798398 208 /* time spent idling for this blkcg_gq */
155fead9
TH
209 struct blkg_stat idle_time;
210 /* total time with empty current active q with other requests queued */
211 struct blkg_stat empty_time;
212 /* fields after this shouldn't be cleared on stat reset */
84c7afce
OS
213 u64 start_group_wait_time;
214 u64 start_idle_time;
215 u64 start_empty_time;
155fead9
TH
216 uint16_t flags;
217#endif /* CONFIG_DEBUG_BLK_CGROUP */
218#endif /* CONFIG_CFQ_GROUP_IOSCHED */
219};
220
e48453c3
AA
221/* Per-cgroup data */
222struct cfq_group_data {
223 /* must be the first member */
81437648 224 struct blkcg_policy_data cpd;
e48453c3
AA
225
226 unsigned int weight;
227 unsigned int leaf_weight;
228};
229
cdb16e8f
VG
230/* This is per cgroup per device grouping structure */
231struct cfq_group {
f95a04af
TH
232 /* must be the first member */
233 struct blkg_policy_data pd;
234
1fa8f6d6
VG
235 /* group service_tree member */
236 struct rb_node rb_node;
237
238 /* group service_tree key */
239 u64 vdisktime;
e71357e1 240
7918ffb5
TH
241 /*
242 * The number of active cfqgs and sum of their weights under this
243 * cfqg. This covers this cfqg's leaf_weight and all children's
244 * weights, but does not cover weights of further descendants.
245 *
246 * If a cfqg is on the service tree, it's active. An active cfqg
247 * also activates its parent and contributes to the children_weight
248 * of the parent.
249 */
250 int nr_active;
251 unsigned int children_weight;
252
1d3650f7
TH
253 /*
254 * vfraction is the fraction of vdisktime that the tasks in this
255 * cfqg are entitled to. This is determined by compounding the
256 * ratios walking up from this cfqg to the root.
257 *
258 * It is in fixed point w/ CFQ_SERVICE_SHIFT and the sum of all
259 * vfractions on a service tree is approximately 1. The sum may
260 * deviate a bit due to rounding errors and fluctuations caused by
261 * cfqgs entering and leaving the service tree.
262 */
263 unsigned int vfraction;
264
e71357e1
TH
265 /*
266 * There are two weights - (internal) weight is the weight of this
267 * cfqg against the sibling cfqgs. leaf_weight is the wight of
268 * this cfqg against the child cfqgs. For the root cfqg, both
269 * weights are kept in sync for backward compatibility.
270 */
25bc6b07 271 unsigned int weight;
8184f93e 272 unsigned int new_weight;
3381cb8d 273 unsigned int dev_weight;
1fa8f6d6 274
e71357e1
TH
275 unsigned int leaf_weight;
276 unsigned int new_leaf_weight;
277 unsigned int dev_leaf_weight;
278
1fa8f6d6
VG
279 /* number of cfqq currently on this group */
280 int nr_cfqq;
281
cdb16e8f 282 /*
4495a7d4 283 * Per group busy queues average. Useful for workload slice calc. We
b4627321
VG
284 * create the array for each prio class but at run time it is used
285 * only for RT and BE class and slot for IDLE class remains unused.
286 * This is primarily done to avoid confusion and a gcc warning.
287 */
288 unsigned int busy_queues_avg[CFQ_PRIO_NR];
289 /*
290 * rr lists of queues with requests. We maintain service trees for
291 * RT and BE classes. These trees are subdivided in subclasses
292 * of SYNC, SYNC_NOIDLE and ASYNC based on workload type. For IDLE
293 * class there is no subclassification and all the cfq queues go on
294 * a single tree service_tree_idle.
cdb16e8f
VG
295 * Counts are embedded in the cfq_rb_root
296 */
297 struct cfq_rb_root service_trees[2][3];
298 struct cfq_rb_root service_tree_idle;
dae739eb 299
9a7f38c4 300 u64 saved_wl_slice;
4d2ceea4
VG
301 enum wl_type_t saved_wl_type;
302 enum wl_class_t saved_wl_class;
4eef3049 303
80bdf0c7
VG
304 /* number of requests that are on the dispatch list or inside driver */
305 int dispatched;
7700fc4f 306 struct cfq_ttime ttime;
0b39920b 307 struct cfqg_stats stats; /* stats for this cfqg */
60a83707
TH
308
309 /* async queue for each priority case */
310 struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
311 struct cfq_queue *async_idle_cfqq;
312
cdb16e8f 313};
718eee05 314
c5869807
TH
315struct cfq_io_cq {
316 struct io_cq icq; /* must be the first member */
317 struct cfq_queue *cfqq[2];
318 struct cfq_ttime ttime;
598971bf
TH
319 int ioprio; /* the current ioprio */
320#ifdef CONFIG_CFQ_GROUP_IOSCHED
f4da8072 321 uint64_t blkcg_serial_nr; /* the current blkcg serial */
598971bf 322#endif
c5869807
TH
323};
324
22e2c507
JA
325/*
326 * Per block device queue structure
327 */
1da177e4 328struct cfq_data {
165125e1 329 struct request_queue *queue;
1fa8f6d6
VG
330 /* Root service tree for cfq_groups */
331 struct cfq_rb_root grp_service_tree;
f51b802c 332 struct cfq_group *root_group;
22e2c507 333
c0324a02
CZ
334 /*
335 * The priority currently being served
22e2c507 336 */
4d2ceea4
VG
337 enum wl_class_t serving_wl_class;
338 enum wl_type_t serving_wl_type;
9a7f38c4 339 u64 workload_expires;
cdb16e8f 340 struct cfq_group *serving_group;
a36e71f9
JA
341
342 /*
343 * Each priority tree is sorted by next_request position. These
344 * trees are used when determining if two or more queues are
345 * interleaving requests (see cfq_close_cooperator).
346 */
347 struct rb_root prio_trees[CFQ_PRIO_LISTS];
348
22e2c507 349 unsigned int busy_queues;
ef8a41df 350 unsigned int busy_sync_queues;
22e2c507 351
53c583d2
CZ
352 int rq_in_driver;
353 int rq_in_flight[2];
45333d5a
AC
354
355 /*
356 * queue-depth detection
357 */
358 int rq_queued;
25776e35 359 int hw_tag;
e459dd08
CZ
360 /*
361 * hw_tag can be
362 * -1 => indeterminate, (cfq will behave as if NCQ is present, to allow better detection)
363 * 1 => NCQ is present (hw_tag_est_depth is the estimated max depth)
364 * 0 => no NCQ
365 */
366 int hw_tag_est_depth;
367 unsigned int hw_tag_samples;
1da177e4 368
22e2c507
JA
369 /*
370 * idle window management
371 */
91148325 372 struct hrtimer idle_slice_timer;
23e018a1 373 struct work_struct unplug_work;
1da177e4 374
22e2c507 375 struct cfq_queue *active_queue;
c5869807 376 struct cfq_io_cq *active_cic;
22e2c507 377
6d048f53 378 sector_t last_position;
1da177e4 379
1da177e4
LT
380 /*
381 * tunables, see top of file
382 */
383 unsigned int cfq_quantum;
1da177e4
LT
384 unsigned int cfq_back_penalty;
385 unsigned int cfq_back_max;
22e2c507 386 unsigned int cfq_slice_async_rq;
963b72fc 387 unsigned int cfq_latency;
9a7f38c4
JM
388 u64 cfq_fifo_expire[2];
389 u64 cfq_slice[2];
390 u64 cfq_slice_idle;
391 u64 cfq_group_idle;
392 u64 cfq_target_latency;
d9ff4187 393
6118b70b
JA
394 /*
395 * Fallback dummy cfqq for extreme OOM conditions
396 */
397 struct cfq_queue oom_cfqq;
365722bb 398
9a7f38c4 399 u64 last_delayed_sync;
1da177e4
LT
400};
401
25fb5169 402static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
60a83707 403static void cfq_put_queue(struct cfq_queue *cfqq);
25fb5169 404
34b98d03 405static struct cfq_rb_root *st_for(struct cfq_group *cfqg,
3bf10fea 406 enum wl_class_t class,
65b32a57 407 enum wl_type_t type)
c0324a02 408{
1fa8f6d6
VG
409 if (!cfqg)
410 return NULL;
411
3bf10fea 412 if (class == IDLE_WORKLOAD)
cdb16e8f 413 return &cfqg->service_tree_idle;
c0324a02 414
3bf10fea 415 return &cfqg->service_trees[class][type];
c0324a02
CZ
416}
417
3b18152c 418enum cfqq_state_flags {
b0b8d749
JA
419 CFQ_CFQQ_FLAG_on_rr = 0, /* on round-robin busy list */
420 CFQ_CFQQ_FLAG_wait_request, /* waiting for a request */
b029195d 421 CFQ_CFQQ_FLAG_must_dispatch, /* must be allowed a dispatch */
b0b8d749 422 CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */
b0b8d749
JA
423 CFQ_CFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */
424 CFQ_CFQQ_FLAG_idle_window, /* slice idling enabled */
425 CFQ_CFQQ_FLAG_prio_changed, /* task priority has changed */
44f7c160 426 CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */
91fac317 427 CFQ_CFQQ_FLAG_sync, /* synchronous queue */
b3b6d040 428 CFQ_CFQQ_FLAG_coop, /* cfqq is shared */
ae54abed 429 CFQ_CFQQ_FLAG_split_coop, /* shared cfqq will be splitted */
76280aff 430 CFQ_CFQQ_FLAG_deep, /* sync cfqq experienced large depth */
f75edf2d 431 CFQ_CFQQ_FLAG_wait_busy, /* Waiting for next request */
3b18152c
JA
432};
433
434#define CFQ_CFQQ_FNS(name) \
435static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq) \
436{ \
fe094d98 437 (cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name); \
3b18152c
JA
438} \
439static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq) \
440{ \
fe094d98 441 (cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name); \
3b18152c
JA
442} \
443static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \
444{ \
fe094d98 445 return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0; \
3b18152c
JA
446}
447
448CFQ_CFQQ_FNS(on_rr);
449CFQ_CFQQ_FNS(wait_request);
b029195d 450CFQ_CFQQ_FNS(must_dispatch);
3b18152c 451CFQ_CFQQ_FNS(must_alloc_slice);
3b18152c
JA
452CFQ_CFQQ_FNS(fifo_expire);
453CFQ_CFQQ_FNS(idle_window);
454CFQ_CFQQ_FNS(prio_changed);
44f7c160 455CFQ_CFQQ_FNS(slice_new);
91fac317 456CFQ_CFQQ_FNS(sync);
a36e71f9 457CFQ_CFQQ_FNS(coop);
ae54abed 458CFQ_CFQQ_FNS(split_coop);
76280aff 459CFQ_CFQQ_FNS(deep);
f75edf2d 460CFQ_CFQQ_FNS(wait_busy);
3b18152c
JA
461#undef CFQ_CFQQ_FNS
462
629ed0b1 463#if defined(CONFIG_CFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
2ce4d50f 464
155fead9
TH
465/* cfqg stats flags */
466enum cfqg_stats_flags {
467 CFQG_stats_waiting = 0,
468 CFQG_stats_idling,
469 CFQG_stats_empty,
629ed0b1
TH
470};
471
155fead9
TH
472#define CFQG_FLAG_FNS(name) \
473static inline void cfqg_stats_mark_##name(struct cfqg_stats *stats) \
629ed0b1 474{ \
155fead9 475 stats->flags |= (1 << CFQG_stats_##name); \
629ed0b1 476} \
155fead9 477static inline void cfqg_stats_clear_##name(struct cfqg_stats *stats) \
629ed0b1 478{ \
155fead9 479 stats->flags &= ~(1 << CFQG_stats_##name); \
629ed0b1 480} \
155fead9 481static inline int cfqg_stats_##name(struct cfqg_stats *stats) \
629ed0b1 482{ \
155fead9 483 return (stats->flags & (1 << CFQG_stats_##name)) != 0; \
629ed0b1
TH
484} \
485
155fead9
TH
486CFQG_FLAG_FNS(waiting)
487CFQG_FLAG_FNS(idling)
488CFQG_FLAG_FNS(empty)
489#undef CFQG_FLAG_FNS
629ed0b1
TH
490
491/* This should be called with the queue_lock held. */
155fead9 492static void cfqg_stats_update_group_wait_time(struct cfqg_stats *stats)
629ed0b1 493{
84c7afce 494 u64 now;
629ed0b1 495
155fead9 496 if (!cfqg_stats_waiting(stats))
629ed0b1
TH
497 return;
498
84c7afce
OS
499 now = ktime_get_ns();
500 if (now > stats->start_group_wait_time)
629ed0b1
TH
501 blkg_stat_add(&stats->group_wait_time,
502 now - stats->start_group_wait_time);
155fead9 503 cfqg_stats_clear_waiting(stats);
629ed0b1
TH
504}
505
506/* This should be called with the queue_lock held. */
155fead9
TH
507static void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg,
508 struct cfq_group *curr_cfqg)
629ed0b1 509{
155fead9 510 struct cfqg_stats *stats = &cfqg->stats;
629ed0b1 511
155fead9 512 if (cfqg_stats_waiting(stats))
629ed0b1 513 return;
155fead9 514 if (cfqg == curr_cfqg)
629ed0b1 515 return;
84c7afce 516 stats->start_group_wait_time = ktime_get_ns();
155fead9 517 cfqg_stats_mark_waiting(stats);
629ed0b1
TH
518}
519
520/* This should be called with the queue_lock held. */
155fead9 521static void cfqg_stats_end_empty_time(struct cfqg_stats *stats)
629ed0b1 522{
84c7afce 523 u64 now;
629ed0b1 524
155fead9 525 if (!cfqg_stats_empty(stats))
629ed0b1
TH
526 return;
527
84c7afce
OS
528 now = ktime_get_ns();
529 if (now > stats->start_empty_time)
629ed0b1
TH
530 blkg_stat_add(&stats->empty_time,
531 now - stats->start_empty_time);
155fead9 532 cfqg_stats_clear_empty(stats);
629ed0b1
TH
533}
534
155fead9 535static void cfqg_stats_update_dequeue(struct cfq_group *cfqg)
629ed0b1 536{
155fead9 537 blkg_stat_add(&cfqg->stats.dequeue, 1);
629ed0b1
TH
538}
539
155fead9 540static void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg)
629ed0b1 541{
155fead9 542 struct cfqg_stats *stats = &cfqg->stats;
629ed0b1 543
4d5e80a7 544 if (blkg_rwstat_total(&stats->queued))
629ed0b1
TH
545 return;
546
547 /*
548 * group is already marked empty. This can happen if cfqq got new
549 * request in parent group and moved to this group while being added
550 * to service tree. Just ignore the event and move on.
551 */
155fead9 552 if (cfqg_stats_empty(stats))
629ed0b1
TH
553 return;
554
84c7afce 555 stats->start_empty_time = ktime_get_ns();
155fead9 556 cfqg_stats_mark_empty(stats);
629ed0b1
TH
557}
558
155fead9 559static void cfqg_stats_update_idle_time(struct cfq_group *cfqg)
629ed0b1 560{
155fead9 561 struct cfqg_stats *stats = &cfqg->stats;
629ed0b1 562
155fead9 563 if (cfqg_stats_idling(stats)) {
84c7afce 564 u64 now = ktime_get_ns();
629ed0b1 565
84c7afce 566 if (now > stats->start_idle_time)
629ed0b1
TH
567 blkg_stat_add(&stats->idle_time,
568 now - stats->start_idle_time);
155fead9 569 cfqg_stats_clear_idling(stats);
629ed0b1
TH
570 }
571}
572
155fead9 573static void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg)
629ed0b1 574{
155fead9 575 struct cfqg_stats *stats = &cfqg->stats;
629ed0b1 576
155fead9 577 BUG_ON(cfqg_stats_idling(stats));
629ed0b1 578
84c7afce 579 stats->start_idle_time = ktime_get_ns();
155fead9 580 cfqg_stats_mark_idling(stats);
629ed0b1
TH
581}
582
155fead9 583static void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg)
629ed0b1 584{
155fead9 585 struct cfqg_stats *stats = &cfqg->stats;
629ed0b1
TH
586
587 blkg_stat_add(&stats->avg_queue_size_sum,
4d5e80a7 588 blkg_rwstat_total(&stats->queued));
629ed0b1 589 blkg_stat_add(&stats->avg_queue_size_samples, 1);
155fead9 590 cfqg_stats_update_group_wait_time(stats);
629ed0b1
TH
591}
592
593#else /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
594
f48ec1d7
TH
595static inline void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg, struct cfq_group *curr_cfqg) { }
596static inline void cfqg_stats_end_empty_time(struct cfqg_stats *stats) { }
597static inline void cfqg_stats_update_dequeue(struct cfq_group *cfqg) { }
598static inline void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg) { }
599static inline void cfqg_stats_update_idle_time(struct cfq_group *cfqg) { }
600static inline void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg) { }
601static inline void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg) { }
629ed0b1
TH
602
603#endif /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
604
605#ifdef CONFIG_CFQ_GROUP_IOSCHED
2ce4d50f 606
4ceab71b
JA
607static inline struct cfq_group *pd_to_cfqg(struct blkg_policy_data *pd)
608{
609 return pd ? container_of(pd, struct cfq_group, pd) : NULL;
610}
611
612static struct cfq_group_data
613*cpd_to_cfqgd(struct blkcg_policy_data *cpd)
614{
81437648 615 return cpd ? container_of(cpd, struct cfq_group_data, cpd) : NULL;
4ceab71b
JA
616}
617
618static inline struct blkcg_gq *cfqg_to_blkg(struct cfq_group *cfqg)
619{
620 return pd_to_blkg(&cfqg->pd);
621}
622
ffea73fc
TH
623static struct blkcg_policy blkcg_policy_cfq;
624
625static inline struct cfq_group *blkg_to_cfqg(struct blkcg_gq *blkg)
626{
627 return pd_to_cfqg(blkg_to_pd(blkg, &blkcg_policy_cfq));
628}
629
e48453c3
AA
630static struct cfq_group_data *blkcg_to_cfqgd(struct blkcg *blkcg)
631{
632 return cpd_to_cfqgd(blkcg_to_cpd(blkcg, &blkcg_policy_cfq));
633}
634
d02f7aa8 635static inline struct cfq_group *cfqg_parent(struct cfq_group *cfqg)
7918ffb5 636{
d02f7aa8 637 struct blkcg_gq *pblkg = cfqg_to_blkg(cfqg)->parent;
7918ffb5 638
d02f7aa8 639 return pblkg ? blkg_to_cfqg(pblkg) : NULL;
7918ffb5
TH
640}
641
3984aa55
JK
642static inline bool cfqg_is_descendant(struct cfq_group *cfqg,
643 struct cfq_group *ancestor)
644{
645 return cgroup_is_descendant(cfqg_to_blkg(cfqg)->blkcg->css.cgroup,
646 cfqg_to_blkg(ancestor)->blkcg->css.cgroup);
647}
648
eb7d8c07
TH
649static inline void cfqg_get(struct cfq_group *cfqg)
650{
651 return blkg_get(cfqg_to_blkg(cfqg));
652}
653
654static inline void cfqg_put(struct cfq_group *cfqg)
655{
656 return blkg_put(cfqg_to_blkg(cfqg));
657}
658
54e7ed12 659#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) do { \
35fe6d76
SL
660 blk_add_cgroup_trace_msg((cfqd)->queue, \
661 cfqg_to_blkg((cfqq)->cfqg)->blkcg, \
662 "cfq%d%c%c " fmt, (cfqq)->pid, \
b226e5c4
VG
663 cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
664 cfqq_type((cfqq)) == SYNC_NOIDLE_WORKLOAD ? 'N' : ' ',\
35fe6d76 665 ##args); \
54e7ed12
TH
666} while (0)
667
668#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do { \
35fe6d76
SL
669 blk_add_cgroup_trace_msg((cfqd)->queue, \
670 cfqg_to_blkg(cfqg)->blkcg, fmt, ##args); \
54e7ed12 671} while (0)
2868ef7b 672
155fead9 673static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
ef295ecf
CH
674 struct cfq_group *curr_cfqg,
675 unsigned int op)
2ce4d50f 676{
ef295ecf 677 blkg_rwstat_add(&cfqg->stats.queued, op, 1);
155fead9
TH
678 cfqg_stats_end_empty_time(&cfqg->stats);
679 cfqg_stats_set_start_group_wait_time(cfqg, curr_cfqg);
2ce4d50f
TH
680}
681
155fead9 682static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
9a7f38c4 683 uint64_t time, unsigned long unaccounted_time)
2ce4d50f 684{
155fead9 685 blkg_stat_add(&cfqg->stats.time, time);
629ed0b1 686#ifdef CONFIG_DEBUG_BLK_CGROUP
155fead9 687 blkg_stat_add(&cfqg->stats.unaccounted_time, unaccounted_time);
629ed0b1 688#endif
2ce4d50f
TH
689}
690
ef295ecf
CH
691static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg,
692 unsigned int op)
2ce4d50f 693{
ef295ecf 694 blkg_rwstat_add(&cfqg->stats.queued, op, -1);
2ce4d50f
TH
695}
696
ef295ecf
CH
697static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg,
698 unsigned int op)
2ce4d50f 699{
ef295ecf 700 blkg_rwstat_add(&cfqg->stats.merged, op, 1);
2ce4d50f
TH
701}
702
155fead9 703static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
84c7afce
OS
704 u64 start_time_ns,
705 u64 io_start_time_ns,
706 unsigned int op)
2ce4d50f 707{
155fead9 708 struct cfqg_stats *stats = &cfqg->stats;
84c7afce 709 u64 now = ktime_get_ns();
629ed0b1 710
84c7afce
OS
711 if (now > io_start_time_ns)
712 blkg_rwstat_add(&stats->service_time, op,
713 now - io_start_time_ns);
714 if (io_start_time_ns > start_time_ns)
ef295ecf 715 blkg_rwstat_add(&stats->wait_time, op,
84c7afce 716 io_start_time_ns - start_time_ns);
2ce4d50f
TH
717}
718
689665af
TH
719/* @stats = 0 */
720static void cfqg_stats_reset(struct cfqg_stats *stats)
155fead9 721{
155fead9 722 /* queued stats shouldn't be cleared */
155fead9
TH
723 blkg_rwstat_reset(&stats->merged);
724 blkg_rwstat_reset(&stats->service_time);
725 blkg_rwstat_reset(&stats->wait_time);
726 blkg_stat_reset(&stats->time);
727#ifdef CONFIG_DEBUG_BLK_CGROUP
728 blkg_stat_reset(&stats->unaccounted_time);
729 blkg_stat_reset(&stats->avg_queue_size_sum);
730 blkg_stat_reset(&stats->avg_queue_size_samples);
731 blkg_stat_reset(&stats->dequeue);
732 blkg_stat_reset(&stats->group_wait_time);
733 blkg_stat_reset(&stats->idle_time);
734 blkg_stat_reset(&stats->empty_time);
735#endif
736}
737
0b39920b 738/* @to += @from */
e6269c44 739static void cfqg_stats_add_aux(struct cfqg_stats *to, struct cfqg_stats *from)
0b39920b
TH
740{
741 /* queued stats shouldn't be cleared */
e6269c44
TH
742 blkg_rwstat_add_aux(&to->merged, &from->merged);
743 blkg_rwstat_add_aux(&to->service_time, &from->service_time);
744 blkg_rwstat_add_aux(&to->wait_time, &from->wait_time);
745 blkg_stat_add_aux(&from->time, &from->time);
0b39920b 746#ifdef CONFIG_DEBUG_BLK_CGROUP
e6269c44
TH
747 blkg_stat_add_aux(&to->unaccounted_time, &from->unaccounted_time);
748 blkg_stat_add_aux(&to->avg_queue_size_sum, &from->avg_queue_size_sum);
749 blkg_stat_add_aux(&to->avg_queue_size_samples, &from->avg_queue_size_samples);
750 blkg_stat_add_aux(&to->dequeue, &from->dequeue);
751 blkg_stat_add_aux(&to->group_wait_time, &from->group_wait_time);
752 blkg_stat_add_aux(&to->idle_time, &from->idle_time);
753 blkg_stat_add_aux(&to->empty_time, &from->empty_time);
0b39920b
TH
754#endif
755}
756
757/*
e6269c44 758 * Transfer @cfqg's stats to its parent's aux counts so that the ancestors'
0b39920b
TH
759 * recursive stats can still account for the amount used by this cfqg after
760 * it's gone.
761 */
762static void cfqg_stats_xfer_dead(struct cfq_group *cfqg)
763{
764 struct cfq_group *parent = cfqg_parent(cfqg);
765
766 lockdep_assert_held(cfqg_to_blkg(cfqg)->q->queue_lock);
767
768 if (unlikely(!parent))
769 return;
770
e6269c44 771 cfqg_stats_add_aux(&parent->stats, &cfqg->stats);
0b39920b 772 cfqg_stats_reset(&cfqg->stats);
0b39920b
TH
773}
774
eb7d8c07
TH
775#else /* CONFIG_CFQ_GROUP_IOSCHED */
776
d02f7aa8 777static inline struct cfq_group *cfqg_parent(struct cfq_group *cfqg) { return NULL; }
3984aa55
JK
778static inline bool cfqg_is_descendant(struct cfq_group *cfqg,
779 struct cfq_group *ancestor)
780{
781 return true;
782}
eb7d8c07
TH
783static inline void cfqg_get(struct cfq_group *cfqg) { }
784static inline void cfqg_put(struct cfq_group *cfqg) { }
785
7b679138 786#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
b226e5c4
VG
787 blk_add_trace_msg((cfqd)->queue, "cfq%d%c%c " fmt, (cfqq)->pid, \
788 cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
789 cfqq_type((cfqq)) == SYNC_NOIDLE_WORKLOAD ? 'N' : ' ',\
790 ##args)
4495a7d4 791#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do {} while (0)
eb7d8c07 792
155fead9 793static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
ef295ecf 794 struct cfq_group *curr_cfqg, unsigned int op) { }
155fead9 795static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
9a7f38c4 796 uint64_t time, unsigned long unaccounted_time) { }
ef295ecf
CH
797static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg,
798 unsigned int op) { }
799static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg,
800 unsigned int op) { }
155fead9 801static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
84c7afce
OS
802 u64 start_time_ns,
803 u64 io_start_time_ns,
804 unsigned int op) { }
2ce4d50f 805
eb7d8c07
TH
806#endif /* CONFIG_CFQ_GROUP_IOSCHED */
807
7b679138
JA
808#define cfq_log(cfqd, fmt, args...) \
809 blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
810
615f0259
VG
811/* Traverses through cfq group service trees */
812#define for_each_cfqg_st(cfqg, i, j, st) \
813 for (i = 0; i <= IDLE_WORKLOAD; i++) \
814 for (j = 0, st = i < IDLE_WORKLOAD ? &cfqg->service_trees[i][j]\
815 : &cfqg->service_tree_idle; \
816 (i < IDLE_WORKLOAD && j <= SYNC_WORKLOAD) || \
817 (i == IDLE_WORKLOAD && j == 0); \
818 j++, st = i < IDLE_WORKLOAD ? \
819 &cfqg->service_trees[i][j]: NULL) \
820
f5f2b6ce
SL
821static inline bool cfq_io_thinktime_big(struct cfq_data *cfqd,
822 struct cfq_ttime *ttime, bool group_idle)
823{
9a7f38c4 824 u64 slice;
f5f2b6ce
SL
825 if (!sample_valid(ttime->ttime_samples))
826 return false;
827 if (group_idle)
828 slice = cfqd->cfq_group_idle;
829 else
830 slice = cfqd->cfq_slice_idle;
831 return ttime->ttime_mean > slice;
832}
615f0259 833
02b35081
VG
834static inline bool iops_mode(struct cfq_data *cfqd)
835{
836 /*
837 * If we are not idling on queues and it is a NCQ drive, parallel
838 * execution of requests is on and measuring time is not possible
839 * in most of the cases until and unless we drive shallower queue
840 * depths and that becomes a performance bottleneck. In such cases
841 * switch to start providing fairness in terms of number of IOs.
842 */
843 if (!cfqd->cfq_slice_idle && cfqd->hw_tag)
844 return true;
845 else
846 return false;
847}
848
3bf10fea 849static inline enum wl_class_t cfqq_class(struct cfq_queue *cfqq)
c0324a02
CZ
850{
851 if (cfq_class_idle(cfqq))
852 return IDLE_WORKLOAD;
853 if (cfq_class_rt(cfqq))
854 return RT_WORKLOAD;
855 return BE_WORKLOAD;
856}
857
718eee05
CZ
858
859static enum wl_type_t cfqq_type(struct cfq_queue *cfqq)
860{
861 if (!cfq_cfqq_sync(cfqq))
862 return ASYNC_WORKLOAD;
863 if (!cfq_cfqq_idle_window(cfqq))
864 return SYNC_NOIDLE_WORKLOAD;
865 return SYNC_WORKLOAD;
866}
867
3bf10fea 868static inline int cfq_group_busy_queues_wl(enum wl_class_t wl_class,
58ff82f3
VG
869 struct cfq_data *cfqd,
870 struct cfq_group *cfqg)
c0324a02 871{
3bf10fea 872 if (wl_class == IDLE_WORKLOAD)
cdb16e8f 873 return cfqg->service_tree_idle.count;
c0324a02 874
34b98d03
VG
875 return cfqg->service_trees[wl_class][ASYNC_WORKLOAD].count +
876 cfqg->service_trees[wl_class][SYNC_NOIDLE_WORKLOAD].count +
877 cfqg->service_trees[wl_class][SYNC_WORKLOAD].count;
c0324a02
CZ
878}
879
f26bd1f0
VG
880static inline int cfqg_busy_async_queues(struct cfq_data *cfqd,
881 struct cfq_group *cfqg)
882{
34b98d03
VG
883 return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count +
884 cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count;
f26bd1f0
VG
885}
886
165125e1 887static void cfq_dispatch_insert(struct request_queue *, struct request *);
4f85cb96 888static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, bool is_sync,
2da8de0b 889 struct cfq_io_cq *cic, struct bio *bio);
91fac317 890
c5869807
TH
891static inline struct cfq_io_cq *icq_to_cic(struct io_cq *icq)
892{
893 /* cic->icq is the first member, %NULL will convert to %NULL */
894 return container_of(icq, struct cfq_io_cq, icq);
895}
896
47fdd4ca
TH
897static inline struct cfq_io_cq *cfq_cic_lookup(struct cfq_data *cfqd,
898 struct io_context *ioc)
899{
900 if (ioc)
901 return icq_to_cic(ioc_lookup_icq(ioc, cfqd->queue));
902 return NULL;
903}
904
c5869807 905static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_cq *cic, bool is_sync)
91fac317 906{
a6151c3a 907 return cic->cfqq[is_sync];
91fac317
VT
908}
909
c5869807
TH
910static inline void cic_set_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq,
911 bool is_sync)
91fac317 912{
a6151c3a 913 cic->cfqq[is_sync] = cfqq;
91fac317
VT
914}
915
c5869807 916static inline struct cfq_data *cic_to_cfqd(struct cfq_io_cq *cic)
bca4b914 917{
c5869807 918 return cic->icq.q->elevator->elevator_data;
bca4b914
KK
919}
920
99f95e52
AM
921/*
922 * scheduler run of queue, if there are requests pending and no one in the
923 * driver that will restart queueing
924 */
23e018a1 925static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
99f95e52 926{
7b679138
JA
927 if (cfqd->busy_queues) {
928 cfq_log(cfqd, "schedule dispatch");
59c3d45e 929 kblockd_schedule_work(&cfqd->unplug_work);
7b679138 930 }
99f95e52
AM
931}
932
44f7c160
JA
933/*
934 * Scale schedule slice based on io priority. Use the sync time slice only
935 * if a queue is marked sync and has sync io queued. A sync queue with async
936 * io only, should not get full sync slice length.
937 */
9a7f38c4 938static inline u64 cfq_prio_slice(struct cfq_data *cfqd, bool sync,
d9e7620e 939 unsigned short prio)
44f7c160 940{
9a7f38c4
JM
941 u64 base_slice = cfqd->cfq_slice[sync];
942 u64 slice = div_u64(base_slice, CFQ_SLICE_SCALE);
44f7c160 943
d9e7620e
JA
944 WARN_ON(prio >= IOPRIO_BE_NR);
945
9a7f38c4 946 return base_slice + (slice * (4 - prio));
d9e7620e 947}
44f7c160 948
9a7f38c4 949static inline u64
d9e7620e
JA
950cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
951{
952 return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
44f7c160
JA
953}
954
1d3650f7
TH
955/**
956 * cfqg_scale_charge - scale disk time charge according to cfqg weight
957 * @charge: disk time being charged
958 * @vfraction: vfraction of the cfqg, fixed point w/ CFQ_SERVICE_SHIFT
959 *
960 * Scale @charge according to @vfraction, which is in range (0, 1]. The
961 * scaling is inversely proportional.
962 *
963 * scaled = charge / vfraction
964 *
965 * The result is also in fixed point w/ CFQ_SERVICE_SHIFT.
966 */
9a7f38c4 967static inline u64 cfqg_scale_charge(u64 charge,
1d3650f7 968 unsigned int vfraction)
25bc6b07 969{
1d3650f7 970 u64 c = charge << CFQ_SERVICE_SHIFT; /* make it fixed point */
25bc6b07 971
1d3650f7
TH
972 /* charge / vfraction */
973 c <<= CFQ_SERVICE_SHIFT;
9a7f38c4 974 return div_u64(c, vfraction);
25bc6b07
VG
975}
976
977static inline u64 max_vdisktime(u64 min_vdisktime, u64 vdisktime)
978{
979 s64 delta = (s64)(vdisktime - min_vdisktime);
980 if (delta > 0)
981 min_vdisktime = vdisktime;
982
983 return min_vdisktime;
984}
985
25bc6b07
VG
986static void update_min_vdisktime(struct cfq_rb_root *st)
987{
09663c86
DB
988 if (!RB_EMPTY_ROOT(&st->rb.rb_root)) {
989 struct cfq_group *cfqg = rb_entry_cfqg(st->rb.rb_leftmost);
25bc6b07 990
a6032710
GJ
991 st->min_vdisktime = max_vdisktime(st->min_vdisktime,
992 cfqg->vdisktime);
25bc6b07 993 }
25bc6b07
VG
994}
995
5db5d642
CZ
996/*
997 * get averaged number of queues of RT/BE priority.
998 * average is updated, with a formula that gives more weight to higher numbers,
999 * to quickly follows sudden increases and decrease slowly
1000 */
1001
58ff82f3
VG
1002static inline unsigned cfq_group_get_avg_queues(struct cfq_data *cfqd,
1003 struct cfq_group *cfqg, bool rt)
5869619c 1004{
5db5d642
CZ
1005 unsigned min_q, max_q;
1006 unsigned mult = cfq_hist_divisor - 1;
1007 unsigned round = cfq_hist_divisor / 2;
58ff82f3 1008 unsigned busy = cfq_group_busy_queues_wl(rt, cfqd, cfqg);
5db5d642 1009
58ff82f3
VG
1010 min_q = min(cfqg->busy_queues_avg[rt], busy);
1011 max_q = max(cfqg->busy_queues_avg[rt], busy);
1012 cfqg->busy_queues_avg[rt] = (mult * max_q + min_q + round) /
5db5d642 1013 cfq_hist_divisor;
58ff82f3
VG
1014 return cfqg->busy_queues_avg[rt];
1015}
1016
9a7f38c4 1017static inline u64
58ff82f3
VG
1018cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg)
1019{
41cad6ab 1020 return cfqd->cfq_target_latency * cfqg->vfraction >> CFQ_SERVICE_SHIFT;
5db5d642
CZ
1021}
1022
9a7f38c4 1023static inline u64
ba5bd520 1024cfq_scaled_cfqq_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
44f7c160 1025{
9a7f38c4 1026 u64 slice = cfq_prio_to_slice(cfqd, cfqq);
5db5d642 1027 if (cfqd->cfq_latency) {
58ff82f3
VG
1028 /*
1029 * interested queues (we consider only the ones with the same
1030 * priority class in the cfq group)
1031 */
1032 unsigned iq = cfq_group_get_avg_queues(cfqd, cfqq->cfqg,
1033 cfq_class_rt(cfqq));
9a7f38c4
JM
1034 u64 sync_slice = cfqd->cfq_slice[1];
1035 u64 expect_latency = sync_slice * iq;
1036 u64 group_slice = cfq_group_slice(cfqd, cfqq->cfqg);
58ff82f3
VG
1037
1038 if (expect_latency > group_slice) {
9a7f38c4
JM
1039 u64 base_low_slice = 2 * cfqd->cfq_slice_idle;
1040 u64 low_slice;
1041
5db5d642
CZ
1042 /* scale low_slice according to IO priority
1043 * and sync vs async */
9a7f38c4
JM
1044 low_slice = div64_u64(base_low_slice*slice, sync_slice);
1045 low_slice = min(slice, low_slice);
5db5d642
CZ
1046 /* the adapted slice value is scaled to fit all iqs
1047 * into the target latency */
9a7f38c4
JM
1048 slice = div64_u64(slice*group_slice, expect_latency);
1049 slice = max(slice, low_slice);
5db5d642
CZ
1050 }
1051 }
c553f8e3
SL
1052 return slice;
1053}
1054
1055static inline void
1056cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1057{
9a7f38c4
JM
1058 u64 slice = cfq_scaled_cfqq_slice(cfqd, cfqq);
1059 u64 now = ktime_get_ns();
c553f8e3 1060
9a7f38c4
JM
1061 cfqq->slice_start = now;
1062 cfqq->slice_end = now + slice;
f75edf2d 1063 cfqq->allocated_slice = slice;
9a7f38c4 1064 cfq_log_cfqq(cfqd, cfqq, "set_slice=%llu", cfqq->slice_end - now);
44f7c160
JA
1065}
1066
1067/*
1068 * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
1069 * isn't valid until the first request from the dispatch is activated
1070 * and the slice time set.
1071 */
a6151c3a 1072static inline bool cfq_slice_used(struct cfq_queue *cfqq)
44f7c160
JA
1073{
1074 if (cfq_cfqq_slice_new(cfqq))
c1e44756 1075 return false;
9a7f38c4 1076 if (ktime_get_ns() < cfqq->slice_end)
c1e44756 1077 return false;
44f7c160 1078
c1e44756 1079 return true;
44f7c160
JA
1080}
1081
1da177e4 1082/*
5e705374 1083 * Lifted from AS - choose which of rq1 and rq2 that is best served now.
1da177e4 1084 * We choose the request that is closest to the head right now. Distance
e8a99053 1085 * behind the head is penalized and only allowed to a certain extent.
1da177e4 1086 */
5e705374 1087static struct request *
cf7c25cf 1088cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2, sector_t last)
1da177e4 1089{
cf7c25cf 1090 sector_t s1, s2, d1 = 0, d2 = 0;
1da177e4 1091 unsigned long back_max;
e8a99053
AM
1092#define CFQ_RQ1_WRAP 0x01 /* request 1 wraps */
1093#define CFQ_RQ2_WRAP 0x02 /* request 2 wraps */
1094 unsigned wrap = 0; /* bit mask: requests behind the disk head? */
1da177e4 1095
5e705374
JA
1096 if (rq1 == NULL || rq1 == rq2)
1097 return rq2;
1098 if (rq2 == NULL)
1099 return rq1;
9c2c38a1 1100
229836bd
NK
1101 if (rq_is_sync(rq1) != rq_is_sync(rq2))
1102 return rq_is_sync(rq1) ? rq1 : rq2;
1103
65299a3b
CH
1104 if ((rq1->cmd_flags ^ rq2->cmd_flags) & REQ_PRIO)
1105 return rq1->cmd_flags & REQ_PRIO ? rq1 : rq2;
b53d1ed7 1106
83096ebf
TH
1107 s1 = blk_rq_pos(rq1);
1108 s2 = blk_rq_pos(rq2);
1da177e4 1109
1da177e4
LT
1110 /*
1111 * by definition, 1KiB is 2 sectors
1112 */
1113 back_max = cfqd->cfq_back_max * 2;
1114
1115 /*
1116 * Strict one way elevator _except_ in the case where we allow
1117 * short backward seeks which are biased as twice the cost of a
1118 * similar forward seek.
1119 */
1120 if (s1 >= last)
1121 d1 = s1 - last;
1122 else if (s1 + back_max >= last)
1123 d1 = (last - s1) * cfqd->cfq_back_penalty;
1124 else
e8a99053 1125 wrap |= CFQ_RQ1_WRAP;
1da177e4
LT
1126
1127 if (s2 >= last)
1128 d2 = s2 - last;
1129 else if (s2 + back_max >= last)
1130 d2 = (last - s2) * cfqd->cfq_back_penalty;
1131 else
e8a99053 1132 wrap |= CFQ_RQ2_WRAP;
1da177e4
LT
1133
1134 /* Found required data */
e8a99053
AM
1135
1136 /*
1137 * By doing switch() on the bit mask "wrap" we avoid having to
1138 * check two variables for all permutations: --> faster!
1139 */
1140 switch (wrap) {
5e705374 1141 case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
e8a99053 1142 if (d1 < d2)
5e705374 1143 return rq1;
e8a99053 1144 else if (d2 < d1)
5e705374 1145 return rq2;
e8a99053
AM
1146 else {
1147 if (s1 >= s2)
5e705374 1148 return rq1;
e8a99053 1149 else
5e705374 1150 return rq2;
e8a99053 1151 }
1da177e4 1152
e8a99053 1153 case CFQ_RQ2_WRAP:
5e705374 1154 return rq1;
e8a99053 1155 case CFQ_RQ1_WRAP:
5e705374
JA
1156 return rq2;
1157 case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
e8a99053
AM
1158 default:
1159 /*
1160 * Since both rqs are wrapped,
1161 * start with the one that's further behind head
1162 * (--> only *one* back seek required),
1163 * since back seek takes more time than forward.
1164 */
1165 if (s1 <= s2)
5e705374 1166 return rq1;
1da177e4 1167 else
5e705374 1168 return rq2;
1da177e4
LT
1169 }
1170}
1171
0871714e 1172static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root)
cc09e299 1173{
615f0259
VG
1174 /* Service tree is empty */
1175 if (!root->count)
1176 return NULL;
1177
09663c86 1178 return rb_entry(rb_first_cached(&root->rb), struct cfq_queue, rb_node);
cc09e299
JA
1179}
1180
1fa8f6d6
VG
1181static struct cfq_group *cfq_rb_first_group(struct cfq_rb_root *root)
1182{
09663c86 1183 return rb_entry_cfqg(rb_first_cached(&root->rb));
1fa8f6d6
VG
1184}
1185
09663c86 1186static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
a36e71f9 1187{
f0f1a45f
DB
1188 if (root->rb_rightmost == n)
1189 root->rb_rightmost = rb_prev(n);
1190
09663c86 1191 rb_erase_cached(n, &root->rb);
a36e71f9 1192 RB_CLEAR_NODE(n);
a36e71f9 1193
aa6f6a3d 1194 --root->count;
cc09e299
JA
1195}
1196
1da177e4
LT
1197/*
1198 * would be nice to take fifo expire time into account as well
1199 */
5e705374
JA
1200static struct request *
1201cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1202 struct request *last)
1da177e4 1203{
21183b07
JA
1204 struct rb_node *rbnext = rb_next(&last->rb_node);
1205 struct rb_node *rbprev = rb_prev(&last->rb_node);
5e705374 1206 struct request *next = NULL, *prev = NULL;
1da177e4 1207
21183b07 1208 BUG_ON(RB_EMPTY_NODE(&last->rb_node));
1da177e4
LT
1209
1210 if (rbprev)
5e705374 1211 prev = rb_entry_rq(rbprev);
1da177e4 1212
21183b07 1213 if (rbnext)
5e705374 1214 next = rb_entry_rq(rbnext);
21183b07
JA
1215 else {
1216 rbnext = rb_first(&cfqq->sort_list);
1217 if (rbnext && rbnext != &last->rb_node)
5e705374 1218 next = rb_entry_rq(rbnext);
21183b07 1219 }
1da177e4 1220
cf7c25cf 1221 return cfq_choose_req(cfqd, next, prev, blk_rq_pos(last));
1da177e4
LT
1222}
1223
9a7f38c4
JM
1224static u64 cfq_slice_offset(struct cfq_data *cfqd,
1225 struct cfq_queue *cfqq)
1da177e4 1226{
d9e7620e
JA
1227 /*
1228 * just an approximation, should be ok.
1229 */
cdb16e8f 1230 return (cfqq->cfqg->nr_cfqq - 1) * (cfq_prio_slice(cfqd, 1, 0) -
464191c6 1231 cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio));
d9e7620e
JA
1232}
1233
1fa8f6d6
VG
1234static inline s64
1235cfqg_key(struct cfq_rb_root *st, struct cfq_group *cfqg)
1236{
1237 return cfqg->vdisktime - st->min_vdisktime;
1238}
1239
1240static void
1241__cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
1242{
09663c86 1243 struct rb_node **node = &st->rb.rb_root.rb_node;
1fa8f6d6
VG
1244 struct rb_node *parent = NULL;
1245 struct cfq_group *__cfqg;
1246 s64 key = cfqg_key(st, cfqg);
f0f1a45f 1247 bool leftmost = true, rightmost = true;
1fa8f6d6
VG
1248
1249 while (*node != NULL) {
1250 parent = *node;
1251 __cfqg = rb_entry_cfqg(parent);
1252
f0f1a45f 1253 if (key < cfqg_key(st, __cfqg)) {
1fa8f6d6 1254 node = &parent->rb_left;
f0f1a45f
DB
1255 rightmost = false;
1256 } else {
1fa8f6d6 1257 node = &parent->rb_right;
09663c86 1258 leftmost = false;
1fa8f6d6
VG
1259 }
1260 }
1261
f0f1a45f
DB
1262 if (rightmost)
1263 st->rb_rightmost = &cfqg->rb_node;
1264
1fa8f6d6 1265 rb_link_node(&cfqg->rb_node, parent, node);
09663c86 1266 rb_insert_color_cached(&cfqg->rb_node, &st->rb, leftmost);
1fa8f6d6
VG
1267}
1268
7b5af5cf
TM
1269/*
1270 * This has to be called only on activation of cfqg
1271 */
1fa8f6d6 1272static void
8184f93e
JT
1273cfq_update_group_weight(struct cfq_group *cfqg)
1274{
3381cb8d 1275 if (cfqg->new_weight) {
8184f93e 1276 cfqg->weight = cfqg->new_weight;
3381cb8d 1277 cfqg->new_weight = 0;
8184f93e 1278 }
e15693ef
TM
1279}
1280
1281static void
1282cfq_update_group_leaf_weight(struct cfq_group *cfqg)
1283{
1284 BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
e71357e1
TH
1285
1286 if (cfqg->new_leaf_weight) {
1287 cfqg->leaf_weight = cfqg->new_leaf_weight;
1288 cfqg->new_leaf_weight = 0;
1289 }
8184f93e
JT
1290}
1291
1292static void
1293cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
1294{
1d3650f7 1295 unsigned int vfr = 1 << CFQ_SERVICE_SHIFT; /* start with 1 */
7918ffb5 1296 struct cfq_group *pos = cfqg;
1d3650f7 1297 struct cfq_group *parent;
7918ffb5
TH
1298 bool propagate;
1299
1300 /* add to the service tree */
8184f93e
JT
1301 BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
1302
7b5af5cf
TM
1303 /*
1304 * Update leaf_weight. We cannot update weight at this point
1305 * because cfqg might already have been activated and is
1306 * contributing its current weight to the parent's child_weight.
1307 */
e15693ef 1308 cfq_update_group_leaf_weight(cfqg);
8184f93e 1309 __cfq_group_service_tree_add(st, cfqg);
7918ffb5
TH
1310
1311 /*
1d3650f7
TH
1312 * Activate @cfqg and calculate the portion of vfraction @cfqg is
1313 * entitled to. vfraction is calculated by walking the tree
1314 * towards the root calculating the fraction it has at each level.
1315 * The compounded ratio is how much vfraction @cfqg owns.
1316 *
1317 * Start with the proportion tasks in this cfqg has against active
1318 * children cfqgs - its leaf_weight against children_weight.
7918ffb5
TH
1319 */
1320 propagate = !pos->nr_active++;
1321 pos->children_weight += pos->leaf_weight;
1d3650f7 1322 vfr = vfr * pos->leaf_weight / pos->children_weight;
7918ffb5 1323
1d3650f7
TH
1324 /*
1325 * Compound ->weight walking up the tree. Both activation and
1326 * vfraction calculation are done in the same loop. Propagation
1327 * stops once an already activated node is met. vfraction
1328 * calculation should always continue to the root.
1329 */
d02f7aa8 1330 while ((parent = cfqg_parent(pos))) {
1d3650f7 1331 if (propagate) {
e15693ef 1332 cfq_update_group_weight(pos);
1d3650f7
TH
1333 propagate = !parent->nr_active++;
1334 parent->children_weight += pos->weight;
1335 }
1336 vfr = vfr * pos->weight / parent->children_weight;
7918ffb5
TH
1337 pos = parent;
1338 }
1d3650f7
TH
1339
1340 cfqg->vfraction = max_t(unsigned, vfr, 1);
8184f93e
JT
1341}
1342
5be6b756
HT
1343static inline u64 cfq_get_cfqg_vdisktime_delay(struct cfq_data *cfqd)
1344{
1345 if (!iops_mode(cfqd))
1346 return CFQ_SLICE_MODE_GROUP_DELAY;
1347 else
1348 return CFQ_IOPS_MODE_GROUP_DELAY;
1349}
1350
8184f93e
JT
1351static void
1352cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
1fa8f6d6
VG
1353{
1354 struct cfq_rb_root *st = &cfqd->grp_service_tree;
1355 struct cfq_group *__cfqg;
1356 struct rb_node *n;
1357
1358 cfqg->nr_cfqq++;
760701bf 1359 if (!RB_EMPTY_NODE(&cfqg->rb_node))
1fa8f6d6
VG
1360 return;
1361
1362 /*
1363 * Currently put the group at the end. Later implement something
1364 * so that groups get lesser vtime based on their weights, so that
25985edc 1365 * if group does not loose all if it was not continuously backlogged.
1fa8f6d6 1366 */
f0f1a45f 1367 n = st->rb_rightmost;
1fa8f6d6
VG
1368 if (n) {
1369 __cfqg = rb_entry_cfqg(n);
5be6b756
HT
1370 cfqg->vdisktime = __cfqg->vdisktime +
1371 cfq_get_cfqg_vdisktime_delay(cfqd);
1fa8f6d6
VG
1372 } else
1373 cfqg->vdisktime = st->min_vdisktime;
8184f93e
JT
1374 cfq_group_service_tree_add(st, cfqg);
1375}
1fa8f6d6 1376
8184f93e
JT
1377static void
1378cfq_group_service_tree_del(struct cfq_rb_root *st, struct cfq_group *cfqg)
1379{
7918ffb5
TH
1380 struct cfq_group *pos = cfqg;
1381 bool propagate;
1382
1383 /*
1384 * Undo activation from cfq_group_service_tree_add(). Deactivate
1385 * @cfqg and propagate deactivation upwards.
1386 */
1387 propagate = !--pos->nr_active;
1388 pos->children_weight -= pos->leaf_weight;
1389
1390 while (propagate) {
d02f7aa8 1391 struct cfq_group *parent = cfqg_parent(pos);
7918ffb5
TH
1392
1393 /* @pos has 0 nr_active at this point */
1394 WARN_ON_ONCE(pos->children_weight);
1d3650f7 1395 pos->vfraction = 0;
7918ffb5
TH
1396
1397 if (!parent)
1398 break;
1399
1400 propagate = !--parent->nr_active;
1401 parent->children_weight -= pos->weight;
1402 pos = parent;
1403 }
1404
1405 /* remove from the service tree */
8184f93e
JT
1406 if (!RB_EMPTY_NODE(&cfqg->rb_node))
1407 cfq_rb_erase(&cfqg->rb_node, st);
1fa8f6d6
VG
1408}
1409
1410static void
8184f93e 1411cfq_group_notify_queue_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
1fa8f6d6
VG
1412{
1413 struct cfq_rb_root *st = &cfqd->grp_service_tree;
1414
1415 BUG_ON(cfqg->nr_cfqq < 1);
1416 cfqg->nr_cfqq--;
25bc6b07 1417
1fa8f6d6
VG
1418 /* If there are other cfq queues under this group, don't delete it */
1419 if (cfqg->nr_cfqq)
1420 return;
1421
2868ef7b 1422 cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
8184f93e 1423 cfq_group_service_tree_del(st, cfqg);
4d2ceea4 1424 cfqg->saved_wl_slice = 0;
155fead9 1425 cfqg_stats_update_dequeue(cfqg);
dae739eb
VG
1426}
1427
9a7f38c4
JM
1428static inline u64 cfq_cfqq_slice_usage(struct cfq_queue *cfqq,
1429 u64 *unaccounted_time)
dae739eb 1430{
9a7f38c4
JM
1431 u64 slice_used;
1432 u64 now = ktime_get_ns();
dae739eb
VG
1433
1434 /*
1435 * Queue got expired before even a single request completed or
1436 * got expired immediately after first request completion.
1437 */
9a7f38c4 1438 if (!cfqq->slice_start || cfqq->slice_start == now) {
dae739eb
VG
1439 /*
1440 * Also charge the seek time incurred to the group, otherwise
1441 * if there are mutiple queues in the group, each can dispatch
1442 * a single request on seeky media and cause lots of seek time
1443 * and group will never know it.
1444 */
0b31c10c
JK
1445 slice_used = max_t(u64, (now - cfqq->dispatch_start),
1446 jiffies_to_nsecs(1));
dae739eb 1447 } else {
9a7f38c4 1448 slice_used = now - cfqq->slice_start;
167400d3
JT
1449 if (slice_used > cfqq->allocated_slice) {
1450 *unaccounted_time = slice_used - cfqq->allocated_slice;
f75edf2d 1451 slice_used = cfqq->allocated_slice;
167400d3 1452 }
9a7f38c4 1453 if (cfqq->slice_start > cfqq->dispatch_start)
167400d3
JT
1454 *unaccounted_time += cfqq->slice_start -
1455 cfqq->dispatch_start;
dae739eb
VG
1456 }
1457
dae739eb
VG
1458 return slice_used;
1459}
1460
1461static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
e5ff082e 1462 struct cfq_queue *cfqq)
dae739eb
VG
1463{
1464 struct cfq_rb_root *st = &cfqd->grp_service_tree;
9a7f38c4 1465 u64 used_sl, charge, unaccounted_sl = 0;
f26bd1f0
VG
1466 int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
1467 - cfqg->service_tree_idle.count;
1d3650f7 1468 unsigned int vfr;
9a7f38c4 1469 u64 now = ktime_get_ns();
f26bd1f0
VG
1470
1471 BUG_ON(nr_sync < 0);
167400d3 1472 used_sl = charge = cfq_cfqq_slice_usage(cfqq, &unaccounted_sl);
dae739eb 1473
02b35081
VG
1474 if (iops_mode(cfqd))
1475 charge = cfqq->slice_dispatch;
1476 else if (!cfq_cfqq_sync(cfqq) && !nr_sync)
1477 charge = cfqq->allocated_slice;
dae739eb 1478
1d3650f7
TH
1479 /*
1480 * Can't update vdisktime while on service tree and cfqg->vfraction
1481 * is valid only while on it. Cache vfr, leave the service tree,
1482 * update vdisktime and go back on. The re-addition to the tree
1483 * will also update the weights as necessary.
1484 */
1485 vfr = cfqg->vfraction;
8184f93e 1486 cfq_group_service_tree_del(st, cfqg);
1d3650f7 1487 cfqg->vdisktime += cfqg_scale_charge(charge, vfr);
8184f93e 1488 cfq_group_service_tree_add(st, cfqg);
dae739eb
VG
1489
1490 /* This group is being expired. Save the context */
9a7f38c4
JM
1491 if (cfqd->workload_expires > now) {
1492 cfqg->saved_wl_slice = cfqd->workload_expires - now;
4d2ceea4
VG
1493 cfqg->saved_wl_type = cfqd->serving_wl_type;
1494 cfqg->saved_wl_class = cfqd->serving_wl_class;
dae739eb 1495 } else
4d2ceea4 1496 cfqg->saved_wl_slice = 0;
2868ef7b
VG
1497
1498 cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
1499 st->min_vdisktime);
fd16d263 1500 cfq_log_cfqq(cfqq->cfqd, cfqq,
9a7f38c4 1501 "sl_used=%llu disp=%llu charge=%llu iops=%u sect=%lu",
fd16d263
JP
1502 used_sl, cfqq->slice_dispatch, charge,
1503 iops_mode(cfqd), cfqq->nr_sectors);
155fead9
TH
1504 cfqg_stats_update_timeslice_used(cfqg, used_sl, unaccounted_sl);
1505 cfqg_stats_set_start_empty_time(cfqg);
1fa8f6d6
VG
1506}
1507
f51b802c
TH
1508/**
1509 * cfq_init_cfqg_base - initialize base part of a cfq_group
1510 * @cfqg: cfq_group to initialize
1511 *
1512 * Initialize the base part which is used whether %CONFIG_CFQ_GROUP_IOSCHED
1513 * is enabled or not.
1514 */
1515static void cfq_init_cfqg_base(struct cfq_group *cfqg)
1516{
1517 struct cfq_rb_root *st;
1518 int i, j;
1519
1520 for_each_cfqg_st(cfqg, i, j, st)
1521 *st = CFQ_RB_ROOT;
1522 RB_CLEAR_NODE(&cfqg->rb_node);
1523
9a7f38c4 1524 cfqg->ttime.last_end_request = ktime_get_ns();
f51b802c
TH
1525}
1526
25fb5169 1527#ifdef CONFIG_CFQ_GROUP_IOSCHED
69d7fde5
TH
1528static int __cfq_set_weight(struct cgroup_subsys_state *css, u64 val,
1529 bool on_dfl, bool reset_dev, bool is_leaf_weight);
1530
24bdb8ef 1531static void cfqg_stats_exit(struct cfqg_stats *stats)
90d3839b 1532{
24bdb8ef
TH
1533 blkg_rwstat_exit(&stats->merged);
1534 blkg_rwstat_exit(&stats->service_time);
1535 blkg_rwstat_exit(&stats->wait_time);
1536 blkg_rwstat_exit(&stats->queued);
24bdb8ef
TH
1537 blkg_stat_exit(&stats->time);
1538#ifdef CONFIG_DEBUG_BLK_CGROUP
1539 blkg_stat_exit(&stats->unaccounted_time);
1540 blkg_stat_exit(&stats->avg_queue_size_sum);
1541 blkg_stat_exit(&stats->avg_queue_size_samples);
1542 blkg_stat_exit(&stats->dequeue);
1543 blkg_stat_exit(&stats->group_wait_time);
1544 blkg_stat_exit(&stats->idle_time);
1545 blkg_stat_exit(&stats->empty_time);
1546#endif
1547}
1548
1549static int cfqg_stats_init(struct cfqg_stats *stats, gfp_t gfp)
1550{
77ea7338 1551 if (blkg_rwstat_init(&stats->merged, gfp) ||
24bdb8ef
TH
1552 blkg_rwstat_init(&stats->service_time, gfp) ||
1553 blkg_rwstat_init(&stats->wait_time, gfp) ||
1554 blkg_rwstat_init(&stats->queued, gfp) ||
24bdb8ef
TH
1555 blkg_stat_init(&stats->time, gfp))
1556 goto err;
90d3839b
PZ
1557
1558#ifdef CONFIG_DEBUG_BLK_CGROUP
24bdb8ef
TH
1559 if (blkg_stat_init(&stats->unaccounted_time, gfp) ||
1560 blkg_stat_init(&stats->avg_queue_size_sum, gfp) ||
1561 blkg_stat_init(&stats->avg_queue_size_samples, gfp) ||
1562 blkg_stat_init(&stats->dequeue, gfp) ||
1563 blkg_stat_init(&stats->group_wait_time, gfp) ||
1564 blkg_stat_init(&stats->idle_time, gfp) ||
1565 blkg_stat_init(&stats->empty_time, gfp))
1566 goto err;
90d3839b 1567#endif
24bdb8ef
TH
1568 return 0;
1569err:
1570 cfqg_stats_exit(stats);
1571 return -ENOMEM;
90d3839b
PZ
1572}
1573
e4a9bde9
TH
1574static struct blkcg_policy_data *cfq_cpd_alloc(gfp_t gfp)
1575{
1576 struct cfq_group_data *cgd;
1577
ebc4ff66 1578 cgd = kzalloc(sizeof(*cgd), gfp);
e4a9bde9
TH
1579 if (!cgd)
1580 return NULL;
1581 return &cgd->cpd;
1582}
1583
81437648 1584static void cfq_cpd_init(struct blkcg_policy_data *cpd)
e48453c3 1585{
81437648 1586 struct cfq_group_data *cgd = cpd_to_cfqgd(cpd);
9e10a130 1587 unsigned int weight = cgroup_subsys_on_dfl(io_cgrp_subsys) ?
69d7fde5 1588 CGROUP_WEIGHT_DFL : CFQ_WEIGHT_LEGACY_DFL;
e48453c3 1589
69d7fde5
TH
1590 if (cpd_to_blkcg(cpd) == &blkcg_root)
1591 weight *= 2;
1592
1593 cgd->weight = weight;
1594 cgd->leaf_weight = weight;
e48453c3
AA
1595}
1596
e4a9bde9
TH
1597static void cfq_cpd_free(struct blkcg_policy_data *cpd)
1598{
1599 kfree(cpd_to_cfqgd(cpd));
1600}
1601
69d7fde5
TH
1602static void cfq_cpd_bind(struct blkcg_policy_data *cpd)
1603{
1604 struct blkcg *blkcg = cpd_to_blkcg(cpd);
9e10a130 1605 bool on_dfl = cgroup_subsys_on_dfl(io_cgrp_subsys);
69d7fde5
TH
1606 unsigned int weight = on_dfl ? CGROUP_WEIGHT_DFL : CFQ_WEIGHT_LEGACY_DFL;
1607
1608 if (blkcg == &blkcg_root)
1609 weight *= 2;
1610
1611 WARN_ON_ONCE(__cfq_set_weight(&blkcg->css, weight, on_dfl, true, false));
1612 WARN_ON_ONCE(__cfq_set_weight(&blkcg->css, weight, on_dfl, true, true));
1613}
1614
001bea73
TH
1615static struct blkg_policy_data *cfq_pd_alloc(gfp_t gfp, int node)
1616{
b2ce2643
TH
1617 struct cfq_group *cfqg;
1618
1619 cfqg = kzalloc_node(sizeof(*cfqg), gfp, node);
1620 if (!cfqg)
1621 return NULL;
1622
1623 cfq_init_cfqg_base(cfqg);
24bdb8ef
TH
1624 if (cfqg_stats_init(&cfqg->stats, gfp)) {
1625 kfree(cfqg);
1626 return NULL;
1627 }
b2ce2643
TH
1628
1629 return &cfqg->pd;
001bea73
TH
1630}
1631
a9520cd6 1632static void cfq_pd_init(struct blkg_policy_data *pd)
f469a7b4 1633{
a9520cd6
TH
1634 struct cfq_group *cfqg = pd_to_cfqg(pd);
1635 struct cfq_group_data *cgd = blkcg_to_cfqgd(pd->blkg->blkcg);
25fb5169 1636
e48453c3
AA
1637 cfqg->weight = cgd->weight;
1638 cfqg->leaf_weight = cgd->leaf_weight;
25fb5169
VG
1639}
1640
a9520cd6 1641static void cfq_pd_offline(struct blkg_policy_data *pd)
0b39920b 1642{
a9520cd6 1643 struct cfq_group *cfqg = pd_to_cfqg(pd);
60a83707
TH
1644 int i;
1645
1646 for (i = 0; i < IOPRIO_BE_NR; i++) {
1647 if (cfqg->async_cfqq[0][i])
1648 cfq_put_queue(cfqg->async_cfqq[0][i]);
1649 if (cfqg->async_cfqq[1][i])
1650 cfq_put_queue(cfqg->async_cfqq[1][i]);
1651 }
1652
1653 if (cfqg->async_idle_cfqq)
1654 cfq_put_queue(cfqg->async_idle_cfqq);
1655
0b39920b
TH
1656 /*
1657 * @blkg is going offline and will be ignored by
1658 * blkg_[rw]stat_recursive_sum(). Transfer stats to the parent so
1659 * that they don't get lost. If IOs complete after this point, the
1660 * stats for them will be lost. Oh well...
1661 */
60a83707 1662 cfqg_stats_xfer_dead(cfqg);
0b39920b
TH
1663}
1664
001bea73
TH
1665static void cfq_pd_free(struct blkg_policy_data *pd)
1666{
24bdb8ef
TH
1667 struct cfq_group *cfqg = pd_to_cfqg(pd);
1668
1669 cfqg_stats_exit(&cfqg->stats);
1670 return kfree(cfqg);
001bea73
TH
1671}
1672
a9520cd6 1673static void cfq_pd_reset_stats(struct blkg_policy_data *pd)
689665af 1674{
a9520cd6 1675 struct cfq_group *cfqg = pd_to_cfqg(pd);
689665af
TH
1676
1677 cfqg_stats_reset(&cfqg->stats);
25fb5169
VG
1678}
1679
ae118896
TH
1680static struct cfq_group *cfq_lookup_cfqg(struct cfq_data *cfqd,
1681 struct blkcg *blkcg)
25fb5169 1682{
ae118896 1683 struct blkcg_gq *blkg;
f469a7b4 1684
ae118896
TH
1685 blkg = blkg_lookup(blkcg, cfqd->queue);
1686 if (likely(blkg))
1687 return blkg_to_cfqg(blkg);
1688 return NULL;
25fb5169
VG
1689}
1690
1691static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
1692{
25fb5169 1693 cfqq->cfqg = cfqg;
b1c35769 1694 /* cfqq reference on cfqg */
eb7d8c07 1695 cfqg_get(cfqg);
b1c35769
VG
1696}
1697
f95a04af
TH
1698static u64 cfqg_prfill_weight_device(struct seq_file *sf,
1699 struct blkg_policy_data *pd, int off)
60c2bc2d 1700{
f95a04af 1701 struct cfq_group *cfqg = pd_to_cfqg(pd);
3381cb8d
TH
1702
1703 if (!cfqg->dev_weight)
60c2bc2d 1704 return 0;
f95a04af 1705 return __blkg_prfill_u64(sf, pd, cfqg->dev_weight);
60c2bc2d
TH
1706}
1707
2da8ca82 1708static int cfqg_print_weight_device(struct seq_file *sf, void *v)
60c2bc2d 1709{
2da8ca82
TH
1710 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1711 cfqg_prfill_weight_device, &blkcg_policy_cfq,
1712 0, false);
60c2bc2d
TH
1713 return 0;
1714}
1715
e71357e1
TH
1716static u64 cfqg_prfill_leaf_weight_device(struct seq_file *sf,
1717 struct blkg_policy_data *pd, int off)
1718{
1719 struct cfq_group *cfqg = pd_to_cfqg(pd);
1720
1721 if (!cfqg->dev_leaf_weight)
1722 return 0;
1723 return __blkg_prfill_u64(sf, pd, cfqg->dev_leaf_weight);
1724}
1725
2da8ca82 1726static int cfqg_print_leaf_weight_device(struct seq_file *sf, void *v)
e71357e1 1727{
2da8ca82
TH
1728 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1729 cfqg_prfill_leaf_weight_device, &blkcg_policy_cfq,
1730 0, false);
e71357e1
TH
1731 return 0;
1732}
1733
2da8ca82 1734static int cfq_print_weight(struct seq_file *sf, void *v)
60c2bc2d 1735{
e48453c3 1736 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
9470e4a6
JA
1737 struct cfq_group_data *cgd = blkcg_to_cfqgd(blkcg);
1738 unsigned int val = 0;
e48453c3 1739
9470e4a6
JA
1740 if (cgd)
1741 val = cgd->weight;
1742
1743 seq_printf(sf, "%u\n", val);
60c2bc2d
TH
1744 return 0;
1745}
1746
2da8ca82 1747static int cfq_print_leaf_weight(struct seq_file *sf, void *v)
e71357e1 1748{
e48453c3 1749 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
9470e4a6
JA
1750 struct cfq_group_data *cgd = blkcg_to_cfqgd(blkcg);
1751 unsigned int val = 0;
1752
1753 if (cgd)
1754 val = cgd->leaf_weight;
e48453c3 1755
9470e4a6 1756 seq_printf(sf, "%u\n", val);
e71357e1
TH
1757 return 0;
1758}
1759
451af504
TH
1760static ssize_t __cfqg_set_weight_device(struct kernfs_open_file *of,
1761 char *buf, size_t nbytes, loff_t off,
2ee867dc 1762 bool on_dfl, bool is_leaf_weight)
60c2bc2d 1763{
69d7fde5
TH
1764 unsigned int min = on_dfl ? CGROUP_WEIGHT_MIN : CFQ_WEIGHT_LEGACY_MIN;
1765 unsigned int max = on_dfl ? CGROUP_WEIGHT_MAX : CFQ_WEIGHT_LEGACY_MAX;
451af504 1766 struct blkcg *blkcg = css_to_blkcg(of_css(of));
60c2bc2d 1767 struct blkg_conf_ctx ctx;
3381cb8d 1768 struct cfq_group *cfqg;
e48453c3 1769 struct cfq_group_data *cfqgd;
60c2bc2d 1770 int ret;
36aa9e5f 1771 u64 v;
60c2bc2d 1772
3c798398 1773 ret = blkg_conf_prep(blkcg, &blkcg_policy_cfq, buf, &ctx);
60c2bc2d
TH
1774 if (ret)
1775 return ret;
1776
2ee867dc
TH
1777 if (sscanf(ctx.body, "%llu", &v) == 1) {
1778 /* require "default" on dfl */
1779 ret = -ERANGE;
1780 if (!v && on_dfl)
1781 goto out_finish;
1782 } else if (!strcmp(strim(ctx.body), "default")) {
1783 v = 0;
1784 } else {
1785 ret = -EINVAL;
36aa9e5f 1786 goto out_finish;
2ee867dc 1787 }
36aa9e5f 1788
3381cb8d 1789 cfqg = blkg_to_cfqg(ctx.blkg);
e48453c3 1790 cfqgd = blkcg_to_cfqgd(blkcg);
ae994ea9 1791
20386ce0 1792 ret = -ERANGE;
69d7fde5 1793 if (!v || (v >= min && v <= max)) {
e71357e1 1794 if (!is_leaf_weight) {
36aa9e5f
TH
1795 cfqg->dev_weight = v;
1796 cfqg->new_weight = v ?: cfqgd->weight;
e71357e1 1797 } else {
36aa9e5f
TH
1798 cfqg->dev_leaf_weight = v;
1799 cfqg->new_leaf_weight = v ?: cfqgd->leaf_weight;
e71357e1 1800 }
60c2bc2d
TH
1801 ret = 0;
1802 }
36aa9e5f 1803out_finish:
60c2bc2d 1804 blkg_conf_finish(&ctx);
451af504 1805 return ret ?: nbytes;
60c2bc2d
TH
1806}
1807
451af504
TH
1808static ssize_t cfqg_set_weight_device(struct kernfs_open_file *of,
1809 char *buf, size_t nbytes, loff_t off)
e71357e1 1810{
2ee867dc 1811 return __cfqg_set_weight_device(of, buf, nbytes, off, false, false);
e71357e1
TH
1812}
1813
451af504
TH
1814static ssize_t cfqg_set_leaf_weight_device(struct kernfs_open_file *of,
1815 char *buf, size_t nbytes, loff_t off)
e71357e1 1816{
2ee867dc 1817 return __cfqg_set_weight_device(of, buf, nbytes, off, false, true);
e71357e1
TH
1818}
1819
dd165eb3 1820static int __cfq_set_weight(struct cgroup_subsys_state *css, u64 val,
69d7fde5 1821 bool on_dfl, bool reset_dev, bool is_leaf_weight)
60c2bc2d 1822{
69d7fde5
TH
1823 unsigned int min = on_dfl ? CGROUP_WEIGHT_MIN : CFQ_WEIGHT_LEGACY_MIN;
1824 unsigned int max = on_dfl ? CGROUP_WEIGHT_MAX : CFQ_WEIGHT_LEGACY_MAX;
182446d0 1825 struct blkcg *blkcg = css_to_blkcg(css);
3c798398 1826 struct blkcg_gq *blkg;
e48453c3 1827 struct cfq_group_data *cfqgd;
ae994ea9 1828 int ret = 0;
60c2bc2d 1829
69d7fde5
TH
1830 if (val < min || val > max)
1831 return -ERANGE;
60c2bc2d
TH
1832
1833 spin_lock_irq(&blkcg->lock);
e48453c3 1834 cfqgd = blkcg_to_cfqgd(blkcg);
ae994ea9
JA
1835 if (!cfqgd) {
1836 ret = -EINVAL;
1837 goto out;
1838 }
e71357e1
TH
1839
1840 if (!is_leaf_weight)
e48453c3 1841 cfqgd->weight = val;
e71357e1 1842 else
e48453c3 1843 cfqgd->leaf_weight = val;
60c2bc2d 1844
b67bfe0d 1845 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
3381cb8d 1846 struct cfq_group *cfqg = blkg_to_cfqg(blkg);
60c2bc2d 1847
e71357e1
TH
1848 if (!cfqg)
1849 continue;
1850
1851 if (!is_leaf_weight) {
69d7fde5
TH
1852 if (reset_dev)
1853 cfqg->dev_weight = 0;
e71357e1 1854 if (!cfqg->dev_weight)
e48453c3 1855 cfqg->new_weight = cfqgd->weight;
e71357e1 1856 } else {
69d7fde5
TH
1857 if (reset_dev)
1858 cfqg->dev_leaf_weight = 0;
e71357e1 1859 if (!cfqg->dev_leaf_weight)
e48453c3 1860 cfqg->new_leaf_weight = cfqgd->leaf_weight;
e71357e1 1861 }
60c2bc2d
TH
1862 }
1863
ae994ea9 1864out:
60c2bc2d 1865 spin_unlock_irq(&blkcg->lock);
ae994ea9 1866 return ret;
60c2bc2d
TH
1867}
1868
182446d0
TH
1869static int cfq_set_weight(struct cgroup_subsys_state *css, struct cftype *cft,
1870 u64 val)
e71357e1 1871{
69d7fde5 1872 return __cfq_set_weight(css, val, false, false, false);
e71357e1
TH
1873}
1874
182446d0
TH
1875static int cfq_set_leaf_weight(struct cgroup_subsys_state *css,
1876 struct cftype *cft, u64 val)
e71357e1 1877{
69d7fde5 1878 return __cfq_set_weight(css, val, false, false, true);
e71357e1
TH
1879}
1880
2da8ca82 1881static int cfqg_print_stat(struct seq_file *sf, void *v)
5bc4afb1 1882{
2da8ca82
TH
1883 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_stat,
1884 &blkcg_policy_cfq, seq_cft(sf)->private, false);
5bc4afb1
TH
1885 return 0;
1886}
1887
2da8ca82 1888static int cfqg_print_rwstat(struct seq_file *sf, void *v)
5bc4afb1 1889{
2da8ca82
TH
1890 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_rwstat,
1891 &blkcg_policy_cfq, seq_cft(sf)->private, true);
5bc4afb1
TH
1892 return 0;
1893}
1894
43114018
TH
1895static u64 cfqg_prfill_stat_recursive(struct seq_file *sf,
1896 struct blkg_policy_data *pd, int off)
1897{
f12c74ca
TH
1898 u64 sum = blkg_stat_recursive_sum(pd_to_blkg(pd),
1899 &blkcg_policy_cfq, off);
43114018
TH
1900 return __blkg_prfill_u64(sf, pd, sum);
1901}
1902
1903static u64 cfqg_prfill_rwstat_recursive(struct seq_file *sf,
1904 struct blkg_policy_data *pd, int off)
1905{
f12c74ca
TH
1906 struct blkg_rwstat sum = blkg_rwstat_recursive_sum(pd_to_blkg(pd),
1907 &blkcg_policy_cfq, off);
43114018
TH
1908 return __blkg_prfill_rwstat(sf, pd, &sum);
1909}
1910
2da8ca82 1911static int cfqg_print_stat_recursive(struct seq_file *sf, void *v)
43114018 1912{
2da8ca82
TH
1913 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1914 cfqg_prfill_stat_recursive, &blkcg_policy_cfq,
1915 seq_cft(sf)->private, false);
43114018
TH
1916 return 0;
1917}
1918
2da8ca82 1919static int cfqg_print_rwstat_recursive(struct seq_file *sf, void *v)
43114018 1920{
2da8ca82
TH
1921 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1922 cfqg_prfill_rwstat_recursive, &blkcg_policy_cfq,
1923 seq_cft(sf)->private, true);
43114018
TH
1924 return 0;
1925}
1926
702747ca
TH
1927static u64 cfqg_prfill_sectors(struct seq_file *sf, struct blkg_policy_data *pd,
1928 int off)
1929{
1930 u64 sum = blkg_rwstat_total(&pd->blkg->stat_bytes);
1931
1932 return __blkg_prfill_u64(sf, pd, sum >> 9);
1933}
1934
1935static int cfqg_print_stat_sectors(struct seq_file *sf, void *v)
1936{
1937 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1938 cfqg_prfill_sectors, &blkcg_policy_cfq, 0, false);
1939 return 0;
1940}
1941
1942static u64 cfqg_prfill_sectors_recursive(struct seq_file *sf,
1943 struct blkg_policy_data *pd, int off)
1944{
1945 struct blkg_rwstat tmp = blkg_rwstat_recursive_sum(pd->blkg, NULL,
1946 offsetof(struct blkcg_gq, stat_bytes));
1947 u64 sum = atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_READ]) +
1948 atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]);
1949
1950 return __blkg_prfill_u64(sf, pd, sum >> 9);
1951}
1952
1953static int cfqg_print_stat_sectors_recursive(struct seq_file *sf, void *v)
1954{
1955 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1956 cfqg_prfill_sectors_recursive, &blkcg_policy_cfq, 0,
1957 false);
1958 return 0;
1959}
1960
60c2bc2d 1961#ifdef CONFIG_DEBUG_BLK_CGROUP
f95a04af
TH
1962static u64 cfqg_prfill_avg_queue_size(struct seq_file *sf,
1963 struct blkg_policy_data *pd, int off)
60c2bc2d 1964{
f95a04af 1965 struct cfq_group *cfqg = pd_to_cfqg(pd);
155fead9 1966 u64 samples = blkg_stat_read(&cfqg->stats.avg_queue_size_samples);
60c2bc2d
TH
1967 u64 v = 0;
1968
1969 if (samples) {
155fead9 1970 v = blkg_stat_read(&cfqg->stats.avg_queue_size_sum);
f3cff25f 1971 v = div64_u64(v, samples);
60c2bc2d 1972 }
f95a04af 1973 __blkg_prfill_u64(sf, pd, v);
60c2bc2d
TH
1974 return 0;
1975}
1976
1977/* print avg_queue_size */
2da8ca82 1978static int cfqg_print_avg_queue_size(struct seq_file *sf, void *v)
60c2bc2d 1979{
2da8ca82
TH
1980 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1981 cfqg_prfill_avg_queue_size, &blkcg_policy_cfq,
1982 0, false);
60c2bc2d
TH
1983 return 0;
1984}
1985#endif /* CONFIG_DEBUG_BLK_CGROUP */
1986
880f50e2 1987static struct cftype cfq_blkcg_legacy_files[] = {
1d3650f7 1988 /* on root, weight is mapped to leaf_weight */
60c2bc2d
TH
1989 {
1990 .name = "weight_device",
1d3650f7 1991 .flags = CFTYPE_ONLY_ON_ROOT,
2da8ca82 1992 .seq_show = cfqg_print_leaf_weight_device,
451af504 1993 .write = cfqg_set_leaf_weight_device,
60c2bc2d
TH
1994 },
1995 {
1996 .name = "weight",
1d3650f7 1997 .flags = CFTYPE_ONLY_ON_ROOT,
2da8ca82 1998 .seq_show = cfq_print_leaf_weight,
1d3650f7 1999 .write_u64 = cfq_set_leaf_weight,
60c2bc2d 2000 },
e71357e1 2001
1d3650f7 2002 /* no such mapping necessary for !roots */
60c2bc2d
TH
2003 {
2004 .name = "weight_device",
1d3650f7 2005 .flags = CFTYPE_NOT_ON_ROOT,
2da8ca82 2006 .seq_show = cfqg_print_weight_device,
451af504 2007 .write = cfqg_set_weight_device,
60c2bc2d
TH
2008 },
2009 {
2010 .name = "weight",
1d3650f7 2011 .flags = CFTYPE_NOT_ON_ROOT,
2da8ca82 2012 .seq_show = cfq_print_weight,
3381cb8d 2013 .write_u64 = cfq_set_weight,
60c2bc2d 2014 },
e71357e1 2015
e71357e1
TH
2016 {
2017 .name = "leaf_weight_device",
2da8ca82 2018 .seq_show = cfqg_print_leaf_weight_device,
451af504 2019 .write = cfqg_set_leaf_weight_device,
e71357e1
TH
2020 },
2021 {
2022 .name = "leaf_weight",
2da8ca82 2023 .seq_show = cfq_print_leaf_weight,
e71357e1
TH
2024 .write_u64 = cfq_set_leaf_weight,
2025 },
2026
43114018 2027 /* statistics, covers only the tasks in the cfqg */
60c2bc2d
TH
2028 {
2029 .name = "time",
5bc4afb1 2030 .private = offsetof(struct cfq_group, stats.time),
2da8ca82 2031 .seq_show = cfqg_print_stat,
60c2bc2d
TH
2032 },
2033 {
2034 .name = "sectors",
702747ca 2035 .seq_show = cfqg_print_stat_sectors,
60c2bc2d
TH
2036 },
2037 {
2038 .name = "io_service_bytes",
77ea7338
TH
2039 .private = (unsigned long)&blkcg_policy_cfq,
2040 .seq_show = blkg_print_stat_bytes,
60c2bc2d
TH
2041 },
2042 {
2043 .name = "io_serviced",
77ea7338
TH
2044 .private = (unsigned long)&blkcg_policy_cfq,
2045 .seq_show = blkg_print_stat_ios,
60c2bc2d
TH
2046 },
2047 {
2048 .name = "io_service_time",
5bc4afb1 2049 .private = offsetof(struct cfq_group, stats.service_time),
2da8ca82 2050 .seq_show = cfqg_print_rwstat,
60c2bc2d
TH
2051 },
2052 {
2053 .name = "io_wait_time",
5bc4afb1 2054 .private = offsetof(struct cfq_group, stats.wait_time),
2da8ca82 2055 .seq_show = cfqg_print_rwstat,
60c2bc2d
TH
2056 },
2057 {
2058 .name = "io_merged",
5bc4afb1 2059 .private = offsetof(struct cfq_group, stats.merged),
2da8ca82 2060 .seq_show = cfqg_print_rwstat,
60c2bc2d
TH
2061 },
2062 {
2063 .name = "io_queued",
5bc4afb1 2064 .private = offsetof(struct cfq_group, stats.queued),
2da8ca82 2065 .seq_show = cfqg_print_rwstat,
60c2bc2d 2066 },
43114018
TH
2067
2068 /* the same statictics which cover the cfqg and its descendants */
2069 {
2070 .name = "time_recursive",
2071 .private = offsetof(struct cfq_group, stats.time),
2da8ca82 2072 .seq_show = cfqg_print_stat_recursive,
43114018
TH
2073 },
2074 {
2075 .name = "sectors_recursive",
702747ca 2076 .seq_show = cfqg_print_stat_sectors_recursive,
43114018
TH
2077 },
2078 {
2079 .name = "io_service_bytes_recursive",
77ea7338
TH
2080 .private = (unsigned long)&blkcg_policy_cfq,
2081 .seq_show = blkg_print_stat_bytes_recursive,
43114018
TH
2082 },
2083 {
2084 .name = "io_serviced_recursive",
77ea7338
TH
2085 .private = (unsigned long)&blkcg_policy_cfq,
2086 .seq_show = blkg_print_stat_ios_recursive,
43114018
TH
2087 },
2088 {
2089 .name = "io_service_time_recursive",
2090 .private = offsetof(struct cfq_group, stats.service_time),
2da8ca82 2091 .seq_show = cfqg_print_rwstat_recursive,
43114018
TH
2092 },
2093 {
2094 .name = "io_wait_time_recursive",
2095 .private = offsetof(struct cfq_group, stats.wait_time),
2da8ca82 2096 .seq_show = cfqg_print_rwstat_recursive,
43114018
TH
2097 },
2098 {
2099 .name = "io_merged_recursive",
2100 .private = offsetof(struct cfq_group, stats.merged),
2da8ca82 2101 .seq_show = cfqg_print_rwstat_recursive,
43114018
TH
2102 },
2103 {
2104 .name = "io_queued_recursive",
2105 .private = offsetof(struct cfq_group, stats.queued),
2da8ca82 2106 .seq_show = cfqg_print_rwstat_recursive,
43114018 2107 },
60c2bc2d
TH
2108#ifdef CONFIG_DEBUG_BLK_CGROUP
2109 {
2110 .name = "avg_queue_size",
2da8ca82 2111 .seq_show = cfqg_print_avg_queue_size,
60c2bc2d
TH
2112 },
2113 {
2114 .name = "group_wait_time",
5bc4afb1 2115 .private = offsetof(struct cfq_group, stats.group_wait_time),
2da8ca82 2116 .seq_show = cfqg_print_stat,
60c2bc2d
TH
2117 },
2118 {
2119 .name = "idle_time",
5bc4afb1 2120 .private = offsetof(struct cfq_group, stats.idle_time),
2da8ca82 2121 .seq_show = cfqg_print_stat,
60c2bc2d
TH
2122 },
2123 {
2124 .name = "empty_time",
5bc4afb1 2125 .private = offsetof(struct cfq_group, stats.empty_time),
2da8ca82 2126 .seq_show = cfqg_print_stat,
60c2bc2d
TH
2127 },
2128 {
2129 .name = "dequeue",
5bc4afb1 2130 .private = offsetof(struct cfq_group, stats.dequeue),
2da8ca82 2131 .seq_show = cfqg_print_stat,
60c2bc2d
TH
2132 },
2133 {
2134 .name = "unaccounted_time",
5bc4afb1 2135 .private = offsetof(struct cfq_group, stats.unaccounted_time),
2da8ca82 2136 .seq_show = cfqg_print_stat,
60c2bc2d
TH
2137 },
2138#endif /* CONFIG_DEBUG_BLK_CGROUP */
2139 { } /* terminate */
2140};
2ee867dc
TH
2141
2142static int cfq_print_weight_on_dfl(struct seq_file *sf, void *v)
2143{
2144 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
2145 struct cfq_group_data *cgd = blkcg_to_cfqgd(blkcg);
2146
2147 seq_printf(sf, "default %u\n", cgd->weight);
2148 blkcg_print_blkgs(sf, blkcg, cfqg_prfill_weight_device,
2149 &blkcg_policy_cfq, 0, false);
2150 return 0;
2151}
2152
2153static ssize_t cfq_set_weight_on_dfl(struct kernfs_open_file *of,
2154 char *buf, size_t nbytes, loff_t off)
2155{
2156 char *endp;
2157 int ret;
2158 u64 v;
2159
2160 buf = strim(buf);
2161
2162 /* "WEIGHT" or "default WEIGHT" sets the default weight */
2163 v = simple_strtoull(buf, &endp, 0);
2164 if (*endp == '\0' || sscanf(buf, "default %llu", &v) == 1) {
69d7fde5 2165 ret = __cfq_set_weight(of_css(of), v, true, false, false);
2ee867dc
TH
2166 return ret ?: nbytes;
2167 }
2168
2169 /* "MAJ:MIN WEIGHT" */
2170 return __cfqg_set_weight_device(of, buf, nbytes, off, true, false);
2171}
2172
2173static struct cftype cfq_blkcg_files[] = {
2174 {
2175 .name = "weight",
2176 .flags = CFTYPE_NOT_ON_ROOT,
2177 .seq_show = cfq_print_weight_on_dfl,
2178 .write = cfq_set_weight_on_dfl,
2179 },
2180 { } /* terminate */
2181};
2182
25fb5169 2183#else /* GROUP_IOSCHED */
ae118896
TH
2184static struct cfq_group *cfq_lookup_cfqg(struct cfq_data *cfqd,
2185 struct blkcg *blkcg)
25fb5169 2186{
f51b802c 2187 return cfqd->root_group;
25fb5169 2188}
7f1dc8a2 2189
25fb5169
VG
2190static inline void
2191cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) {
2192 cfqq->cfqg = cfqg;
2193}
2194
2195#endif /* GROUP_IOSCHED */
2196
498d3aa2 2197/*
c0324a02 2198 * The cfqd->service_trees holds all pending cfq_queue's that have
498d3aa2
JA
2199 * requests waiting to be processed. It is sorted in the order that
2200 * we will service the queues.
2201 */
a36e71f9 2202static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
a6151c3a 2203 bool add_front)
d9e7620e 2204{
0871714e
JA
2205 struct rb_node **p, *parent;
2206 struct cfq_queue *__cfqq;
9a7f38c4 2207 u64 rb_key;
34b98d03 2208 struct cfq_rb_root *st;
09663c86 2209 bool leftmost = true;
dae739eb 2210 int new_cfqq = 1;
9a7f38c4 2211 u64 now = ktime_get_ns();
ae30c286 2212
34b98d03 2213 st = st_for(cfqq->cfqg, cfqq_class(cfqq), cfqq_type(cfqq));
0871714e
JA
2214 if (cfq_class_idle(cfqq)) {
2215 rb_key = CFQ_IDLE_DELAY;
f0f1a45f 2216 parent = st->rb_rightmost;
0871714e
JA
2217 if (parent && parent != &cfqq->rb_node) {
2218 __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
2219 rb_key += __cfqq->rb_key;
2220 } else
9a7f38c4 2221 rb_key += now;
0871714e 2222 } else if (!add_front) {
b9c8946b
JA
2223 /*
2224 * Get our rb key offset. Subtract any residual slice
2225 * value carried from last service. A negative resid
2226 * count indicates slice overrun, and this should position
2227 * the next service time further away in the tree.
2228 */
9a7f38c4 2229 rb_key = cfq_slice_offset(cfqd, cfqq) + now;
b9c8946b 2230 rb_key -= cfqq->slice_resid;
edd75ffd 2231 cfqq->slice_resid = 0;
48e025e6 2232 } else {
9a7f38c4 2233 rb_key = -NSEC_PER_SEC;
34b98d03 2234 __cfqq = cfq_rb_first(st);
9a7f38c4 2235 rb_key += __cfqq ? __cfqq->rb_key : now;
48e025e6 2236 }
1da177e4 2237
d9e7620e 2238 if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
dae739eb 2239 new_cfqq = 0;
99f9628a 2240 /*
d9e7620e 2241 * same position, nothing more to do
99f9628a 2242 */
34b98d03 2243 if (rb_key == cfqq->rb_key && cfqq->service_tree == st)
d9e7620e 2244 return;
1da177e4 2245
aa6f6a3d
CZ
2246 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
2247 cfqq->service_tree = NULL;
1da177e4 2248 }
d9e7620e 2249
0871714e 2250 parent = NULL;
34b98d03 2251 cfqq->service_tree = st;
09663c86 2252 p = &st->rb.rb_root.rb_node;
d9e7620e
JA
2253 while (*p) {
2254 parent = *p;
2255 __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
2256
0c534e0a 2257 /*
c0324a02 2258 * sort by key, that represents service time.
0c534e0a 2259 */
9a7f38c4 2260 if (rb_key < __cfqq->rb_key)
1f23f121 2261 p = &parent->rb_left;
c0324a02 2262 else {
1f23f121 2263 p = &parent->rb_right;
09663c86 2264 leftmost = false;
c0324a02 2265 }
d9e7620e
JA
2266 }
2267
2268 cfqq->rb_key = rb_key;
2269 rb_link_node(&cfqq->rb_node, parent, p);
09663c86 2270 rb_insert_color_cached(&cfqq->rb_node, &st->rb, leftmost);
34b98d03 2271 st->count++;
20359f27 2272 if (add_front || !new_cfqq)
dae739eb 2273 return;
8184f93e 2274 cfq_group_notify_queue_add(cfqd, cfqq->cfqg);
1da177e4
LT
2275}
2276
a36e71f9 2277static struct cfq_queue *
f2d1f0ae
JA
2278cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root,
2279 sector_t sector, struct rb_node **ret_parent,
2280 struct rb_node ***rb_link)
a36e71f9 2281{
a36e71f9
JA
2282 struct rb_node **p, *parent;
2283 struct cfq_queue *cfqq = NULL;
2284
2285 parent = NULL;
2286 p = &root->rb_node;
2287 while (*p) {
2288 struct rb_node **n;
2289
2290 parent = *p;
2291 cfqq = rb_entry(parent, struct cfq_queue, p_node);
2292
2293 /*
2294 * Sort strictly based on sector. Smallest to the left,
2295 * largest to the right.
2296 */
2e46e8b2 2297 if (sector > blk_rq_pos(cfqq->next_rq))
a36e71f9 2298 n = &(*p)->rb_right;
2e46e8b2 2299 else if (sector < blk_rq_pos(cfqq->next_rq))
a36e71f9
JA
2300 n = &(*p)->rb_left;
2301 else
2302 break;
2303 p = n;
3ac6c9f8 2304 cfqq = NULL;
a36e71f9
JA
2305 }
2306
2307 *ret_parent = parent;
2308 if (rb_link)
2309 *rb_link = p;
3ac6c9f8 2310 return cfqq;
a36e71f9
JA
2311}
2312
2313static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2314{
a36e71f9
JA
2315 struct rb_node **p, *parent;
2316 struct cfq_queue *__cfqq;
2317
f2d1f0ae
JA
2318 if (cfqq->p_root) {
2319 rb_erase(&cfqq->p_node, cfqq->p_root);
2320 cfqq->p_root = NULL;
2321 }
a36e71f9
JA
2322
2323 if (cfq_class_idle(cfqq))
2324 return;
2325 if (!cfqq->next_rq)
2326 return;
2327
f2d1f0ae 2328 cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio];
2e46e8b2
TH
2329 __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root,
2330 blk_rq_pos(cfqq->next_rq), &parent, &p);
3ac6c9f8
JA
2331 if (!__cfqq) {
2332 rb_link_node(&cfqq->p_node, parent, p);
f2d1f0ae
JA
2333 rb_insert_color(&cfqq->p_node, cfqq->p_root);
2334 } else
2335 cfqq->p_root = NULL;
a36e71f9
JA
2336}
2337
498d3aa2
JA
2338/*
2339 * Update cfqq's position in the service tree.
2340 */
edd75ffd 2341static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
6d048f53 2342{
6d048f53
JA
2343 /*
2344 * Resorting requires the cfqq to be on the RR list already.
2345 */
a36e71f9 2346 if (cfq_cfqq_on_rr(cfqq)) {
edd75ffd 2347 cfq_service_tree_add(cfqd, cfqq, 0);
a36e71f9
JA
2348 cfq_prio_tree_add(cfqd, cfqq);
2349 }
6d048f53
JA
2350}
2351
1da177e4
LT
2352/*
2353 * add to busy list of queues for service, trying to be fair in ordering
22e2c507 2354 * the pending list according to last request service
1da177e4 2355 */
febffd61 2356static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1da177e4 2357{
7b679138 2358 cfq_log_cfqq(cfqd, cfqq, "add_to_rr");
3b18152c
JA
2359 BUG_ON(cfq_cfqq_on_rr(cfqq));
2360 cfq_mark_cfqq_on_rr(cfqq);
1da177e4 2361 cfqd->busy_queues++;
ef8a41df
SL
2362 if (cfq_cfqq_sync(cfqq))
2363 cfqd->busy_sync_queues++;
1da177e4 2364
edd75ffd 2365 cfq_resort_rr_list(cfqd, cfqq);
1da177e4
LT
2366}
2367
498d3aa2
JA
2368/*
2369 * Called when the cfqq no longer has requests pending, remove it from
2370 * the service tree.
2371 */
febffd61 2372static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1da177e4 2373{
7b679138 2374 cfq_log_cfqq(cfqd, cfqq, "del_from_rr");
3b18152c
JA
2375 BUG_ON(!cfq_cfqq_on_rr(cfqq));
2376 cfq_clear_cfqq_on_rr(cfqq);
1da177e4 2377
aa6f6a3d
CZ
2378 if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
2379 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
2380 cfqq->service_tree = NULL;
2381 }
f2d1f0ae
JA
2382 if (cfqq->p_root) {
2383 rb_erase(&cfqq->p_node, cfqq->p_root);
2384 cfqq->p_root = NULL;
2385 }
d9e7620e 2386
8184f93e 2387 cfq_group_notify_queue_del(cfqd, cfqq->cfqg);
1da177e4
LT
2388 BUG_ON(!cfqd->busy_queues);
2389 cfqd->busy_queues--;
ef8a41df
SL
2390 if (cfq_cfqq_sync(cfqq))
2391 cfqd->busy_sync_queues--;
1da177e4
LT
2392}
2393
2394/*
2395 * rb tree support functions
2396 */
febffd61 2397static void cfq_del_rq_rb(struct request *rq)
1da177e4 2398{
5e705374 2399 struct cfq_queue *cfqq = RQ_CFQQ(rq);
5e705374 2400 const int sync = rq_is_sync(rq);
1da177e4 2401
b4878f24
JA
2402 BUG_ON(!cfqq->queued[sync]);
2403 cfqq->queued[sync]--;
1da177e4 2404
5e705374 2405 elv_rb_del(&cfqq->sort_list, rq);
1da177e4 2406
f04a6424
VG
2407 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) {
2408 /*
2409 * Queue will be deleted from service tree when we actually
2410 * expire it later. Right now just remove it from prio tree
2411 * as it is empty.
2412 */
2413 if (cfqq->p_root) {
2414 rb_erase(&cfqq->p_node, cfqq->p_root);
2415 cfqq->p_root = NULL;
2416 }
2417 }
1da177e4
LT
2418}
2419
5e705374 2420static void cfq_add_rq_rb(struct request *rq)
1da177e4 2421{
5e705374 2422 struct cfq_queue *cfqq = RQ_CFQQ(rq);
1da177e4 2423 struct cfq_data *cfqd = cfqq->cfqd;
796d5116 2424 struct request *prev;
1da177e4 2425
5380a101 2426 cfqq->queued[rq_is_sync(rq)]++;
1da177e4 2427
796d5116 2428 elv_rb_add(&cfqq->sort_list, rq);
5fccbf61
JA
2429
2430 if (!cfq_cfqq_on_rr(cfqq))
2431 cfq_add_cfqq_rr(cfqd, cfqq);
5044eed4
JA
2432
2433 /*
2434 * check if this request is a better next-serve candidate
2435 */
a36e71f9 2436 prev = cfqq->next_rq;
cf7c25cf 2437 cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq, cfqd->last_position);
a36e71f9
JA
2438
2439 /*
2440 * adjust priority tree position, if ->next_rq changes
2441 */
2442 if (prev != cfqq->next_rq)
2443 cfq_prio_tree_add(cfqd, cfqq);
2444
5044eed4 2445 BUG_ON(!cfqq->next_rq);
1da177e4
LT
2446}
2447
febffd61 2448static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
1da177e4 2449{
5380a101
JA
2450 elv_rb_del(&cfqq->sort_list, rq);
2451 cfqq->queued[rq_is_sync(rq)]--;
ef295ecf 2452 cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags);
5e705374 2453 cfq_add_rq_rb(rq);
155fead9 2454 cfqg_stats_update_io_add(RQ_CFQG(rq), cfqq->cfqd->serving_group,
ef295ecf 2455 rq->cmd_flags);
1da177e4
LT
2456}
2457
206dc69b
JA
2458static struct request *
2459cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
1da177e4 2460{
206dc69b 2461 struct task_struct *tsk = current;
c5869807 2462 struct cfq_io_cq *cic;
206dc69b 2463 struct cfq_queue *cfqq;
1da177e4 2464
4ac845a2 2465 cic = cfq_cic_lookup(cfqd, tsk->io_context);
91fac317
VT
2466 if (!cic)
2467 return NULL;
2468
aa39ebd4 2469 cfqq = cic_to_cfqq(cic, op_is_sync(bio->bi_opf));
f73a1c7d
KO
2470 if (cfqq)
2471 return elv_rb_find(&cfqq->sort_list, bio_end_sector(bio));
1da177e4 2472
1da177e4
LT
2473 return NULL;
2474}
2475
165125e1 2476static void cfq_activate_request(struct request_queue *q, struct request *rq)
1da177e4 2477{
22e2c507 2478 struct cfq_data *cfqd = q->elevator->elevator_data;
3b18152c 2479
53c583d2 2480 cfqd->rq_in_driver++;
7b679138 2481 cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
53c583d2 2482 cfqd->rq_in_driver);
25776e35 2483
5b93629b 2484 cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
1da177e4
LT
2485}
2486
165125e1 2487static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
1da177e4 2488{
b4878f24
JA
2489 struct cfq_data *cfqd = q->elevator->elevator_data;
2490
53c583d2
CZ
2491 WARN_ON(!cfqd->rq_in_driver);
2492 cfqd->rq_in_driver--;
7b679138 2493 cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d",
53c583d2 2494 cfqd->rq_in_driver);
1da177e4
LT
2495}
2496
b4878f24 2497static void cfq_remove_request(struct request *rq)
1da177e4 2498{
5e705374 2499 struct cfq_queue *cfqq = RQ_CFQQ(rq);
21183b07 2500
5e705374
JA
2501 if (cfqq->next_rq == rq)
2502 cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
1da177e4 2503
b4878f24 2504 list_del_init(&rq->queuelist);
5e705374 2505 cfq_del_rq_rb(rq);
374f84ac 2506
45333d5a 2507 cfqq->cfqd->rq_queued--;
ef295ecf 2508 cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags);
65299a3b
CH
2509 if (rq->cmd_flags & REQ_PRIO) {
2510 WARN_ON(!cfqq->prio_pending);
2511 cfqq->prio_pending--;
b53d1ed7 2512 }
1da177e4
LT
2513}
2514
34fe7c05 2515static enum elv_merge cfq_merge(struct request_queue *q, struct request **req,
165125e1 2516 struct bio *bio)
1da177e4
LT
2517{
2518 struct cfq_data *cfqd = q->elevator->elevator_data;
2519 struct request *__rq;
1da177e4 2520
206dc69b 2521 __rq = cfq_find_rq_fmerge(cfqd, bio);
72ef799b 2522 if (__rq && elv_bio_merge_ok(__rq, bio)) {
9817064b
JA
2523 *req = __rq;
2524 return ELEVATOR_FRONT_MERGE;
1da177e4
LT
2525 }
2526
2527 return ELEVATOR_NO_MERGE;
1da177e4
LT
2528}
2529
165125e1 2530static void cfq_merged_request(struct request_queue *q, struct request *req,
34fe7c05 2531 enum elv_merge type)
1da177e4 2532{
21183b07 2533 if (type == ELEVATOR_FRONT_MERGE) {
5e705374 2534 struct cfq_queue *cfqq = RQ_CFQQ(req);
1da177e4 2535
5e705374 2536 cfq_reposition_rq_rb(cfqq, req);
1da177e4 2537 }
1da177e4
LT
2538}
2539
812d4026
DS
2540static void cfq_bio_merged(struct request_queue *q, struct request *req,
2541 struct bio *bio)
2542{
ef295ecf 2543 cfqg_stats_update_io_merged(RQ_CFQG(req), bio->bi_opf);
812d4026
DS
2544}
2545
1da177e4 2546static void
165125e1 2547cfq_merged_requests(struct request_queue *q, struct request *rq,
1da177e4
LT
2548 struct request *next)
2549{
cf7c25cf 2550 struct cfq_queue *cfqq = RQ_CFQQ(rq);
4a0b75c7
SL
2551 struct cfq_data *cfqd = q->elevator->elevator_data;
2552
22e2c507
JA
2553 /*
2554 * reposition in fifo if next is older than rq
2555 */
2556 if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
9a7f38c4 2557 next->fifo_time < rq->fifo_time &&
3d106fba 2558 cfqq == RQ_CFQQ(next)) {
22e2c507 2559 list_move(&rq->queuelist, &next->queuelist);
8b4922d3 2560 rq->fifo_time = next->fifo_time;
30996f40 2561 }
22e2c507 2562
cf7c25cf
CZ
2563 if (cfqq->next_rq == next)
2564 cfqq->next_rq = rq;
b4878f24 2565 cfq_remove_request(next);
ef295ecf 2566 cfqg_stats_update_io_merged(RQ_CFQG(rq), next->cmd_flags);
4a0b75c7
SL
2567
2568 cfqq = RQ_CFQQ(next);
2569 /*
2570 * all requests of this queue are merged to other queues, delete it
2571 * from the service tree. If it's the active_queue,
2572 * cfq_dispatch_requests() will choose to expire it or do idle
2573 */
2574 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list) &&
2575 cfqq != cfqd->active_queue)
2576 cfq_del_cfqq_rr(cfqd, cfqq);
22e2c507
JA
2577}
2578
72ef799b
TE
2579static int cfq_allow_bio_merge(struct request_queue *q, struct request *rq,
2580 struct bio *bio)
da775265
JA
2581{
2582 struct cfq_data *cfqd = q->elevator->elevator_data;
aa39ebd4 2583 bool is_sync = op_is_sync(bio->bi_opf);
c5869807 2584 struct cfq_io_cq *cic;
da775265 2585 struct cfq_queue *cfqq;
da775265
JA
2586
2587 /*
ec8acb69 2588 * Disallow merge of a sync bio into an async request.
da775265 2589 */
aa39ebd4 2590 if (is_sync && !rq_is_sync(rq))
a6151c3a 2591 return false;
da775265
JA
2592
2593 /*
f1a4f4d3 2594 * Lookup the cfqq that this bio will be queued with and allow
07c2bd37 2595 * merge only if rq is queued there.
f1a4f4d3 2596 */
07c2bd37
TH
2597 cic = cfq_cic_lookup(cfqd, current->io_context);
2598 if (!cic)
2599 return false;
719d3402 2600
aa39ebd4 2601 cfqq = cic_to_cfqq(cic, is_sync);
a6151c3a 2602 return cfqq == RQ_CFQQ(rq);
da775265
JA
2603}
2604
72ef799b
TE
2605static int cfq_allow_rq_merge(struct request_queue *q, struct request *rq,
2606 struct request *next)
2607{
2608 return RQ_CFQQ(rq) == RQ_CFQQ(next);
2609}
2610
812df48d
DS
2611static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2612{
91148325 2613 hrtimer_try_to_cancel(&cfqd->idle_slice_timer);
155fead9 2614 cfqg_stats_update_idle_time(cfqq->cfqg);
812df48d
DS
2615}
2616
febffd61
JA
2617static void __cfq_set_active_queue(struct cfq_data *cfqd,
2618 struct cfq_queue *cfqq)
22e2c507
JA
2619{
2620 if (cfqq) {
3bf10fea 2621 cfq_log_cfqq(cfqd, cfqq, "set_active wl_class:%d wl_type:%d",
4d2ceea4 2622 cfqd->serving_wl_class, cfqd->serving_wl_type);
155fead9 2623 cfqg_stats_update_avg_queue_size(cfqq->cfqg);
62a37f6b 2624 cfqq->slice_start = 0;
9a7f38c4 2625 cfqq->dispatch_start = ktime_get_ns();
62a37f6b
JT
2626 cfqq->allocated_slice = 0;
2627 cfqq->slice_end = 0;
2628 cfqq->slice_dispatch = 0;
2629 cfqq->nr_sectors = 0;
2630
2631 cfq_clear_cfqq_wait_request(cfqq);
2632 cfq_clear_cfqq_must_dispatch(cfqq);
2633 cfq_clear_cfqq_must_alloc_slice(cfqq);
2634 cfq_clear_cfqq_fifo_expire(cfqq);
2635 cfq_mark_cfqq_slice_new(cfqq);
2636
2637 cfq_del_timer(cfqd, cfqq);
22e2c507
JA
2638 }
2639
2640 cfqd->active_queue = cfqq;
2641}
2642
7b14e3b5
JA
2643/*
2644 * current cfqq expired its slice (or was too idle), select new one
2645 */
2646static void
2647__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
e5ff082e 2648 bool timed_out)
7b14e3b5 2649{
7b679138
JA
2650 cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
2651
7b14e3b5 2652 if (cfq_cfqq_wait_request(cfqq))
812df48d 2653 cfq_del_timer(cfqd, cfqq);
7b14e3b5 2654
7b14e3b5 2655 cfq_clear_cfqq_wait_request(cfqq);
f75edf2d 2656 cfq_clear_cfqq_wait_busy(cfqq);
7b14e3b5 2657
ae54abed
SL
2658 /*
2659 * If this cfqq is shared between multiple processes, check to
2660 * make sure that those processes are still issuing I/Os within
2661 * the mean seek distance. If not, it may be time to break the
2662 * queues apart again.
2663 */
2664 if (cfq_cfqq_coop(cfqq) && CFQQ_SEEKY(cfqq))
2665 cfq_mark_cfqq_split_coop(cfqq);
2666
7b14e3b5 2667 /*
6084cdda 2668 * store what was left of this slice, if the queue idled/timed out
7b14e3b5 2669 */
c553f8e3
SL
2670 if (timed_out) {
2671 if (cfq_cfqq_slice_new(cfqq))
ba5bd520 2672 cfqq->slice_resid = cfq_scaled_cfqq_slice(cfqd, cfqq);
c553f8e3 2673 else
9a7f38c4 2674 cfqq->slice_resid = cfqq->slice_end - ktime_get_ns();
93fdf147 2675 cfq_log_cfqq(cfqd, cfqq, "resid=%lld", cfqq->slice_resid);
7b679138 2676 }
7b14e3b5 2677
e5ff082e 2678 cfq_group_served(cfqd, cfqq->cfqg, cfqq);
dae739eb 2679
f04a6424
VG
2680 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
2681 cfq_del_cfqq_rr(cfqd, cfqq);
2682
edd75ffd 2683 cfq_resort_rr_list(cfqd, cfqq);
7b14e3b5
JA
2684
2685 if (cfqq == cfqd->active_queue)
2686 cfqd->active_queue = NULL;
2687
2688 if (cfqd->active_cic) {
11a3122f 2689 put_io_context(cfqd->active_cic->icq.ioc);
7b14e3b5
JA
2690 cfqd->active_cic = NULL;
2691 }
7b14e3b5
JA
2692}
2693
e5ff082e 2694static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
7b14e3b5
JA
2695{
2696 struct cfq_queue *cfqq = cfqd->active_queue;
2697
2698 if (cfqq)
e5ff082e 2699 __cfq_slice_expired(cfqd, cfqq, timed_out);
7b14e3b5
JA
2700}
2701
498d3aa2
JA
2702/*
2703 * Get next queue for service. Unless we have a queue preemption,
2704 * we'll simply select the first cfqq in the service tree.
2705 */
6d048f53 2706static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
22e2c507 2707{
34b98d03
VG
2708 struct cfq_rb_root *st = st_for(cfqd->serving_group,
2709 cfqd->serving_wl_class, cfqd->serving_wl_type);
d9e7620e 2710
f04a6424
VG
2711 if (!cfqd->rq_queued)
2712 return NULL;
2713
1fa8f6d6 2714 /* There is nothing to dispatch */
34b98d03 2715 if (!st)
1fa8f6d6 2716 return NULL;
09663c86 2717 if (RB_EMPTY_ROOT(&st->rb.rb_root))
c0324a02 2718 return NULL;
34b98d03 2719 return cfq_rb_first(st);
6d048f53
JA
2720}
2721
f04a6424
VG
2722static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd)
2723{
25fb5169 2724 struct cfq_group *cfqg;
f04a6424
VG
2725 struct cfq_queue *cfqq;
2726 int i, j;
2727 struct cfq_rb_root *st;
2728
2729 if (!cfqd->rq_queued)
2730 return NULL;
2731
25fb5169
VG
2732 cfqg = cfq_get_next_cfqg(cfqd);
2733 if (!cfqg)
2734 return NULL;
2735
1cf41753
ME
2736 for_each_cfqg_st(cfqg, i, j, st) {
2737 cfqq = cfq_rb_first(st);
2738 if (cfqq)
f04a6424 2739 return cfqq;
1cf41753 2740 }
f04a6424
VG
2741 return NULL;
2742}
2743
498d3aa2
JA
2744/*
2745 * Get and set a new active queue for service.
2746 */
a36e71f9
JA
2747static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
2748 struct cfq_queue *cfqq)
6d048f53 2749{
e00ef799 2750 if (!cfqq)
a36e71f9 2751 cfqq = cfq_get_next_queue(cfqd);
6d048f53 2752
22e2c507 2753 __cfq_set_active_queue(cfqd, cfqq);
3b18152c 2754 return cfqq;
22e2c507
JA
2755}
2756
d9e7620e
JA
2757static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
2758 struct request *rq)
2759{
83096ebf
TH
2760 if (blk_rq_pos(rq) >= cfqd->last_position)
2761 return blk_rq_pos(rq) - cfqd->last_position;
d9e7620e 2762 else
83096ebf 2763 return cfqd->last_position - blk_rq_pos(rq);
d9e7620e
JA
2764}
2765
b2c18e1e 2766static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
e9ce335d 2767 struct request *rq)
6d048f53 2768{
e9ce335d 2769 return cfq_dist_from_last(cfqd, rq) <= CFQQ_CLOSE_THR;
6d048f53
JA
2770}
2771
a36e71f9
JA
2772static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
2773 struct cfq_queue *cur_cfqq)
2774{
f2d1f0ae 2775 struct rb_root *root = &cfqd->prio_trees[cur_cfqq->org_ioprio];
a36e71f9
JA
2776 struct rb_node *parent, *node;
2777 struct cfq_queue *__cfqq;
2778 sector_t sector = cfqd->last_position;
2779
2780 if (RB_EMPTY_ROOT(root))
2781 return NULL;
2782
2783 /*
2784 * First, if we find a request starting at the end of the last
2785 * request, choose it.
2786 */
f2d1f0ae 2787 __cfqq = cfq_prio_tree_lookup(cfqd, root, sector, &parent, NULL);
a36e71f9
JA
2788 if (__cfqq)
2789 return __cfqq;
2790
2791 /*
2792 * If the exact sector wasn't found, the parent of the NULL leaf
2793 * will contain the closest sector.
2794 */
2795 __cfqq = rb_entry(parent, struct cfq_queue, p_node);
e9ce335d 2796 if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
a36e71f9
JA
2797 return __cfqq;
2798
2e46e8b2 2799 if (blk_rq_pos(__cfqq->next_rq) < sector)
a36e71f9
JA
2800 node = rb_next(&__cfqq->p_node);
2801 else
2802 node = rb_prev(&__cfqq->p_node);
2803 if (!node)
2804 return NULL;
2805
2806 __cfqq = rb_entry(node, struct cfq_queue, p_node);
e9ce335d 2807 if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
a36e71f9
JA
2808 return __cfqq;
2809
2810 return NULL;
2811}
2812
2813/*
2814 * cfqd - obvious
2815 * cur_cfqq - passed in so that we don't decide that the current queue is
2816 * closely cooperating with itself.
2817 *
2818 * So, basically we're assuming that that cur_cfqq has dispatched at least
2819 * one request, and that cfqd->last_position reflects a position on the disk
2820 * associated with the I/O issued by cur_cfqq. I'm not sure this is a valid
2821 * assumption.
2822 */
2823static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
b3b6d040 2824 struct cfq_queue *cur_cfqq)
6d048f53 2825{
a36e71f9
JA
2826 struct cfq_queue *cfqq;
2827
39c01b21
DS
2828 if (cfq_class_idle(cur_cfqq))
2829 return NULL;
e6c5bc73
JM
2830 if (!cfq_cfqq_sync(cur_cfqq))
2831 return NULL;
2832 if (CFQQ_SEEKY(cur_cfqq))
2833 return NULL;
2834
b9d8f4c7
GJ
2835 /*
2836 * Don't search priority tree if it's the only queue in the group.
2837 */
2838 if (cur_cfqq->cfqg->nr_cfqq == 1)
2839 return NULL;
2840
6d048f53 2841 /*
d9e7620e
JA
2842 * We should notice if some of the queues are cooperating, eg
2843 * working closely on the same area of the disk. In that case,
2844 * we can group them together and don't waste time idling.
6d048f53 2845 */
a36e71f9
JA
2846 cfqq = cfqq_close(cfqd, cur_cfqq);
2847 if (!cfqq)
2848 return NULL;
2849
8682e1f1
VG
2850 /* If new queue belongs to different cfq_group, don't choose it */
2851 if (cur_cfqq->cfqg != cfqq->cfqg)
2852 return NULL;
2853
df5fe3e8
JM
2854 /*
2855 * It only makes sense to merge sync queues.
2856 */
2857 if (!cfq_cfqq_sync(cfqq))
2858 return NULL;
e6c5bc73
JM
2859 if (CFQQ_SEEKY(cfqq))
2860 return NULL;
df5fe3e8 2861
c0324a02
CZ
2862 /*
2863 * Do not merge queues of different priority classes
2864 */
2865 if (cfq_class_rt(cfqq) != cfq_class_rt(cur_cfqq))
2866 return NULL;
2867
a36e71f9 2868 return cfqq;
6d048f53
JA
2869}
2870
a6d44e98
CZ
2871/*
2872 * Determine whether we should enforce idle window for this queue.
2873 */
2874
2875static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2876{
3bf10fea 2877 enum wl_class_t wl_class = cfqq_class(cfqq);
34b98d03 2878 struct cfq_rb_root *st = cfqq->service_tree;
a6d44e98 2879
34b98d03
VG
2880 BUG_ON(!st);
2881 BUG_ON(!st->count);
f04a6424 2882
b6508c16
VG
2883 if (!cfqd->cfq_slice_idle)
2884 return false;
2885
a6d44e98 2886 /* We never do for idle class queues. */
3bf10fea 2887 if (wl_class == IDLE_WORKLOAD)
a6d44e98
CZ
2888 return false;
2889
2890 /* We do for queues that were marked with idle window flag. */
3c764b7a
SL
2891 if (cfq_cfqq_idle_window(cfqq) &&
2892 !(blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag))
a6d44e98
CZ
2893 return true;
2894
2895 /*
2896 * Otherwise, we do only if they are the last ones
2897 * in their service tree.
2898 */
34b98d03
VG
2899 if (st->count == 1 && cfq_cfqq_sync(cfqq) &&
2900 !cfq_io_thinktime_big(cfqd, &st->ttime, false))
c1e44756 2901 return true;
34b98d03 2902 cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d", st->count);
c1e44756 2903 return false;
a6d44e98
CZ
2904}
2905
6d048f53 2906static void cfq_arm_slice_timer(struct cfq_data *cfqd)
22e2c507 2907{
1792669c 2908 struct cfq_queue *cfqq = cfqd->active_queue;
e795421e 2909 struct cfq_rb_root *st = cfqq->service_tree;
c5869807 2910 struct cfq_io_cq *cic;
9a7f38c4
JM
2911 u64 sl, group_idle = 0;
2912 u64 now = ktime_get_ns();
7b14e3b5 2913
a68bbddb 2914 /*
f7d7b7a7
JA
2915 * SSD device without seek penalty, disable idling. But only do so
2916 * for devices that support queuing, otherwise we still have a problem
2917 * with sync vs async workloads.
a68bbddb 2918 */
b3193bc0
RH
2919 if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag &&
2920 !cfqd->cfq_group_idle)
a68bbddb
JA
2921 return;
2922
dd67d051 2923 WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
6d048f53 2924 WARN_ON(cfq_cfqq_slice_new(cfqq));
22e2c507
JA
2925
2926 /*
2927 * idle is disabled, either manually or by past process history
2928 */
80bdf0c7
VG
2929 if (!cfq_should_idle(cfqd, cfqq)) {
2930 /* no queue idling. Check for group idling */
2931 if (cfqd->cfq_group_idle)
2932 group_idle = cfqd->cfq_group_idle;
2933 else
2934 return;
2935 }
6d048f53 2936
7b679138 2937 /*
8e550632 2938 * still active requests from this queue, don't idle
7b679138 2939 */
8e550632 2940 if (cfqq->dispatched)
7b679138
JA
2941 return;
2942
22e2c507
JA
2943 /*
2944 * task has exited, don't wait
2945 */
206dc69b 2946 cic = cfqd->active_cic;
f6e8d01b 2947 if (!cic || !atomic_read(&cic->icq.ioc->active_ref))
6d048f53
JA
2948 return;
2949
355b659c
CZ
2950 /*
2951 * If our average think time is larger than the remaining time
2952 * slice, then don't idle. This avoids overrunning the allotted
2953 * time slice.
2954 */
383cd721 2955 if (sample_valid(cic->ttime.ttime_samples) &&
9a7f38c4
JM
2956 (cfqq->slice_end - now < cic->ttime.ttime_mean)) {
2957 cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%llu",
383cd721 2958 cic->ttime.ttime_mean);
355b659c 2959 return;
b1ffe737 2960 }
355b659c 2961
e795421e
JK
2962 /*
2963 * There are other queues in the group or this is the only group and
2964 * it has too big thinktime, don't do group idle.
2965 */
2966 if (group_idle &&
2967 (cfqq->cfqg->nr_cfqq > 1 ||
2968 cfq_io_thinktime_big(cfqd, &st->ttime, true)))
80bdf0c7
VG
2969 return;
2970
3b18152c 2971 cfq_mark_cfqq_wait_request(cfqq);
22e2c507 2972
80bdf0c7
VG
2973 if (group_idle)
2974 sl = cfqd->cfq_group_idle;
2975 else
2976 sl = cfqd->cfq_slice_idle;
206dc69b 2977
91148325
JK
2978 hrtimer_start(&cfqd->idle_slice_timer, ns_to_ktime(sl),
2979 HRTIMER_MODE_REL);
155fead9 2980 cfqg_stats_set_start_idle_time(cfqq->cfqg);
9a7f38c4 2981 cfq_log_cfqq(cfqd, cfqq, "arm_idle: %llu group_idle: %d", sl,
80bdf0c7 2982 group_idle ? 1 : 0);
1da177e4
LT
2983}
2984
498d3aa2
JA
2985/*
2986 * Move request from internal lists to the request queue dispatch list.
2987 */
165125e1 2988static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
1da177e4 2989{
3ed9a296 2990 struct cfq_data *cfqd = q->elevator->elevator_data;
5e705374 2991 struct cfq_queue *cfqq = RQ_CFQQ(rq);
22e2c507 2992
7b679138
JA
2993 cfq_log_cfqq(cfqd, cfqq, "dispatch_insert");
2994
06d21886 2995 cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq);
5380a101 2996 cfq_remove_request(rq);
6d048f53 2997 cfqq->dispatched++;
80bdf0c7 2998 (RQ_CFQG(rq))->dispatched++;
5380a101 2999 elv_dispatch_sort(q, rq);
3ed9a296 3000
53c583d2 3001 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
c4e7893e 3002 cfqq->nr_sectors += blk_rq_sectors(rq);
1da177e4
LT
3003}
3004
3005/*
3006 * return expired entry, or NULL to just start from scratch in rbtree
3007 */
febffd61 3008static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
1da177e4 3009{
30996f40 3010 struct request *rq = NULL;
1da177e4 3011
3b18152c 3012 if (cfq_cfqq_fifo_expire(cfqq))
1da177e4 3013 return NULL;
cb887411
JA
3014
3015 cfq_mark_cfqq_fifo_expire(cfqq);
3016
89850f7e
JA
3017 if (list_empty(&cfqq->fifo))
3018 return NULL;
1da177e4 3019
89850f7e 3020 rq = rq_entry_fifo(cfqq->fifo.next);
9a7f38c4 3021 if (ktime_get_ns() < rq->fifo_time)
7b679138 3022 rq = NULL;
1da177e4 3023
6d048f53 3024 return rq;
1da177e4
LT
3025}
3026
22e2c507
JA
3027static inline int
3028cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3029{
3030 const int base_rq = cfqd->cfq_slice_async_rq;
1da177e4 3031
22e2c507 3032 WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
1da177e4 3033
b9f8ce05 3034 return 2 * base_rq * (IOPRIO_BE_NR - cfqq->ioprio);
1da177e4
LT
3035}
3036
df5fe3e8
JM
3037/*
3038 * Must be called with the queue_lock held.
3039 */
3040static int cfqq_process_refs(struct cfq_queue *cfqq)
3041{
3042 int process_refs, io_refs;
3043
3044 io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE];
30d7b944 3045 process_refs = cfqq->ref - io_refs;
df5fe3e8
JM
3046 BUG_ON(process_refs < 0);
3047 return process_refs;
3048}
3049
3050static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
3051{
e6c5bc73 3052 int process_refs, new_process_refs;
df5fe3e8
JM
3053 struct cfq_queue *__cfqq;
3054
c10b61f0
JM
3055 /*
3056 * If there are no process references on the new_cfqq, then it is
3057 * unsafe to follow the ->new_cfqq chain as other cfqq's in the
3058 * chain may have dropped their last reference (not just their
3059 * last process reference).
3060 */
3061 if (!cfqq_process_refs(new_cfqq))
3062 return;
3063
df5fe3e8
JM
3064 /* Avoid a circular list and skip interim queue merges */
3065 while ((__cfqq = new_cfqq->new_cfqq)) {
3066 if (__cfqq == cfqq)
3067 return;
3068 new_cfqq = __cfqq;
3069 }
3070
3071 process_refs = cfqq_process_refs(cfqq);
c10b61f0 3072 new_process_refs = cfqq_process_refs(new_cfqq);
df5fe3e8
JM
3073 /*
3074 * If the process for the cfqq has gone away, there is no
3075 * sense in merging the queues.
3076 */
c10b61f0 3077 if (process_refs == 0 || new_process_refs == 0)
df5fe3e8
JM
3078 return;
3079
e6c5bc73
JM
3080 /*
3081 * Merge in the direction of the lesser amount of work.
3082 */
e6c5bc73
JM
3083 if (new_process_refs >= process_refs) {
3084 cfqq->new_cfqq = new_cfqq;
30d7b944 3085 new_cfqq->ref += process_refs;
e6c5bc73
JM
3086 } else {
3087 new_cfqq->new_cfqq = cfqq;
30d7b944 3088 cfqq->ref += new_process_refs;
e6c5bc73 3089 }
df5fe3e8
JM
3090}
3091
6d816ec7 3092static enum wl_type_t cfq_choose_wl_type(struct cfq_data *cfqd,
3bf10fea 3093 struct cfq_group *cfqg, enum wl_class_t wl_class)
718eee05
CZ
3094{
3095 struct cfq_queue *queue;
3096 int i;
3097 bool key_valid = false;
9a7f38c4 3098 u64 lowest_key = 0;
718eee05
CZ
3099 enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD;
3100
65b32a57
VG
3101 for (i = 0; i <= SYNC_WORKLOAD; ++i) {
3102 /* select the one with lowest rb_key */
34b98d03 3103 queue = cfq_rb_first(st_for(cfqg, wl_class, i));
718eee05 3104 if (queue &&
9a7f38c4 3105 (!key_valid || queue->rb_key < lowest_key)) {
718eee05
CZ
3106 lowest_key = queue->rb_key;
3107 cur_best = i;
3108 key_valid = true;
3109 }
3110 }
3111
3112 return cur_best;
3113}
3114
6d816ec7
VG
3115static void
3116choose_wl_class_and_type(struct cfq_data *cfqd, struct cfq_group *cfqg)
718eee05 3117{
9a7f38c4 3118 u64 slice;
718eee05 3119 unsigned count;
cdb16e8f 3120 struct cfq_rb_root *st;
9a7f38c4 3121 u64 group_slice;
4d2ceea4 3122 enum wl_class_t original_class = cfqd->serving_wl_class;
9a7f38c4 3123 u64 now = ktime_get_ns();
1fa8f6d6 3124
718eee05 3125 /* Choose next priority. RT > BE > IDLE */
58ff82f3 3126 if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg))
4d2ceea4 3127 cfqd->serving_wl_class = RT_WORKLOAD;
58ff82f3 3128 else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg))
4d2ceea4 3129 cfqd->serving_wl_class = BE_WORKLOAD;
718eee05 3130 else {
4d2ceea4 3131 cfqd->serving_wl_class = IDLE_WORKLOAD;
9a7f38c4 3132 cfqd->workload_expires = now + jiffies_to_nsecs(1);
718eee05
CZ
3133 return;
3134 }
3135
4d2ceea4 3136 if (original_class != cfqd->serving_wl_class)
e4ea0c16
SL
3137 goto new_workload;
3138
718eee05
CZ
3139 /*
3140 * For RT and BE, we have to choose also the type
3141 * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload
3142 * expiration time
3143 */
34b98d03 3144 st = st_for(cfqg, cfqd->serving_wl_class, cfqd->serving_wl_type);
cdb16e8f 3145 count = st->count;
718eee05
CZ
3146
3147 /*
65b32a57 3148 * check workload expiration, and that we still have other queues ready
718eee05 3149 */
9a7f38c4 3150 if (count && !(now > cfqd->workload_expires))
718eee05
CZ
3151 return;
3152
e4ea0c16 3153new_workload:
718eee05 3154 /* otherwise select new workload type */
6d816ec7 3155 cfqd->serving_wl_type = cfq_choose_wl_type(cfqd, cfqg,
4d2ceea4 3156 cfqd->serving_wl_class);
34b98d03 3157 st = st_for(cfqg, cfqd->serving_wl_class, cfqd->serving_wl_type);
cdb16e8f 3158 count = st->count;
718eee05
CZ
3159
3160 /*
3161 * the workload slice is computed as a fraction of target latency
3162 * proportional to the number of queues in that workload, over
3163 * all the queues in the same priority class
3164 */
58ff82f3
VG
3165 group_slice = cfq_group_slice(cfqd, cfqg);
3166
9a7f38c4 3167 slice = div_u64(group_slice * count,
4d2ceea4
VG
3168 max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_wl_class],
3169 cfq_group_busy_queues_wl(cfqd->serving_wl_class, cfqd,
9a7f38c4 3170 cfqg)));
718eee05 3171
4d2ceea4 3172 if (cfqd->serving_wl_type == ASYNC_WORKLOAD) {
9a7f38c4 3173 u64 tmp;
f26bd1f0
VG
3174
3175 /*
3176 * Async queues are currently system wide. Just taking
3177 * proportion of queues with-in same group will lead to higher
3178 * async ratio system wide as generally root group is going
3179 * to have higher weight. A more accurate thing would be to
3180 * calculate system wide asnc/sync ratio.
3181 */
5bf14c07
TM
3182 tmp = cfqd->cfq_target_latency *
3183 cfqg_busy_async_queues(cfqd, cfqg);
9a7f38c4
JM
3184 tmp = div_u64(tmp, cfqd->busy_queues);
3185 slice = min_t(u64, slice, tmp);
f26bd1f0 3186
718eee05
CZ
3187 /* async workload slice is scaled down according to
3188 * the sync/async slice ratio. */
9a7f38c4 3189 slice = div64_u64(slice*cfqd->cfq_slice[0], cfqd->cfq_slice[1]);
f26bd1f0 3190 } else
718eee05
CZ
3191 /* sync workload slice is at least 2 * cfq_slice_idle */
3192 slice = max(slice, 2 * cfqd->cfq_slice_idle);
3193
9a7f38c4
JM
3194 slice = max_t(u64, slice, CFQ_MIN_TT);
3195 cfq_log(cfqd, "workload slice:%llu", slice);
3196 cfqd->workload_expires = now + slice;
718eee05
CZ
3197}
3198
1fa8f6d6
VG
3199static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd)
3200{
3201 struct cfq_rb_root *st = &cfqd->grp_service_tree;
25bc6b07 3202 struct cfq_group *cfqg;
1fa8f6d6 3203
09663c86 3204 if (RB_EMPTY_ROOT(&st->rb.rb_root))
1fa8f6d6 3205 return NULL;
25bc6b07 3206 cfqg = cfq_rb_first_group(st);
25bc6b07
VG
3207 update_min_vdisktime(st);
3208 return cfqg;
1fa8f6d6
VG
3209}
3210
cdb16e8f
VG
3211static void cfq_choose_cfqg(struct cfq_data *cfqd)
3212{
1fa8f6d6 3213 struct cfq_group *cfqg = cfq_get_next_cfqg(cfqd);
9a7f38c4 3214 u64 now = ktime_get_ns();
1fa8f6d6
VG
3215
3216 cfqd->serving_group = cfqg;
dae739eb
VG
3217
3218 /* Restore the workload type data */
4d2ceea4 3219 if (cfqg->saved_wl_slice) {
9a7f38c4 3220 cfqd->workload_expires = now + cfqg->saved_wl_slice;
4d2ceea4
VG
3221 cfqd->serving_wl_type = cfqg->saved_wl_type;
3222 cfqd->serving_wl_class = cfqg->saved_wl_class;
66ae2919 3223 } else
9a7f38c4 3224 cfqd->workload_expires = now - 1;
66ae2919 3225
6d816ec7 3226 choose_wl_class_and_type(cfqd, cfqg);
cdb16e8f
VG
3227}
3228
22e2c507 3229/*
498d3aa2
JA
3230 * Select a queue for service. If we have a current active queue,
3231 * check whether to continue servicing it, or retrieve and set a new one.
22e2c507 3232 */
1b5ed5e1 3233static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
1da177e4 3234{
a36e71f9 3235 struct cfq_queue *cfqq, *new_cfqq = NULL;
9a7f38c4 3236 u64 now = ktime_get_ns();
1da177e4 3237
22e2c507
JA
3238 cfqq = cfqd->active_queue;
3239 if (!cfqq)
3240 goto new_queue;
1da177e4 3241
f04a6424
VG
3242 if (!cfqd->rq_queued)
3243 return NULL;
c244bb50
VG
3244
3245 /*
3246 * We were waiting for group to get backlogged. Expire the queue
3247 */
3248 if (cfq_cfqq_wait_busy(cfqq) && !RB_EMPTY_ROOT(&cfqq->sort_list))
3249 goto expire;
3250
22e2c507 3251 /*
6d048f53 3252 * The active queue has run out of time, expire it and select new.
22e2c507 3253 */
7667aa06
VG
3254 if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) {
3255 /*
3256 * If slice had not expired at the completion of last request
3257 * we might not have turned on wait_busy flag. Don't expire
3258 * the queue yet. Allow the group to get backlogged.
3259 *
3260 * The very fact that we have used the slice, that means we
3261 * have been idling all along on this queue and it should be
3262 * ok to wait for this request to complete.
3263 */
82bbbf28
VG
3264 if (cfqq->cfqg->nr_cfqq == 1 && RB_EMPTY_ROOT(&cfqq->sort_list)
3265 && cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
3266 cfqq = NULL;
7667aa06 3267 goto keep_queue;
82bbbf28 3268 } else
80bdf0c7 3269 goto check_group_idle;
7667aa06 3270 }
1da177e4 3271
22e2c507 3272 /*
6d048f53
JA
3273 * The active queue has requests and isn't expired, allow it to
3274 * dispatch.
22e2c507 3275 */
dd67d051 3276 if (!RB_EMPTY_ROOT(&cfqq->sort_list))
22e2c507 3277 goto keep_queue;
6d048f53 3278
a36e71f9
JA
3279 /*
3280 * If another queue has a request waiting within our mean seek
3281 * distance, let it run. The expire code will check for close
3282 * cooperators and put the close queue at the front of the service
df5fe3e8 3283 * tree. If possible, merge the expiring queue with the new cfqq.
a36e71f9 3284 */
b3b6d040 3285 new_cfqq = cfq_close_cooperator(cfqd, cfqq);
df5fe3e8
JM
3286 if (new_cfqq) {
3287 if (!cfqq->new_cfqq)
3288 cfq_setup_merge(cfqq, new_cfqq);
a36e71f9 3289 goto expire;
df5fe3e8 3290 }
a36e71f9 3291
6d048f53
JA
3292 /*
3293 * No requests pending. If the active queue still has requests in
3294 * flight or is idling for a new request, allow either of these
3295 * conditions to happen (or time out) before selecting a new queue.
3296 */
91148325 3297 if (hrtimer_active(&cfqd->idle_slice_timer)) {
80bdf0c7
VG
3298 cfqq = NULL;
3299 goto keep_queue;
3300 }
3301
8e1ac665
SL
3302 /*
3303 * This is a deep seek queue, but the device is much faster than
3304 * the queue can deliver, don't idle
3305 **/
3306 if (CFQQ_SEEKY(cfqq) && cfq_cfqq_idle_window(cfqq) &&
3307 (cfq_cfqq_slice_new(cfqq) ||
9a7f38c4 3308 (cfqq->slice_end - now > now - cfqq->slice_start))) {
8e1ac665
SL
3309 cfq_clear_cfqq_deep(cfqq);
3310 cfq_clear_cfqq_idle_window(cfqq);
3311 }
3312
80bdf0c7
VG
3313 if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
3314 cfqq = NULL;
3315 goto keep_queue;
3316 }
3317
3318 /*
3319 * If group idle is enabled and there are requests dispatched from
3320 * this group, wait for requests to complete.
3321 */
3322check_group_idle:
7700fc4f
SL
3323 if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1 &&
3324 cfqq->cfqg->dispatched &&
3325 !cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true)) {
caaa5f9f
JA
3326 cfqq = NULL;
3327 goto keep_queue;
22e2c507
JA
3328 }
3329
3b18152c 3330expire:
e5ff082e 3331 cfq_slice_expired(cfqd, 0);
3b18152c 3332new_queue:
718eee05
CZ
3333 /*
3334 * Current queue expired. Check if we have to switch to a new
3335 * service tree
3336 */
3337 if (!new_cfqq)
cdb16e8f 3338 cfq_choose_cfqg(cfqd);
718eee05 3339
a36e71f9 3340 cfqq = cfq_set_active_queue(cfqd, new_cfqq);
22e2c507 3341keep_queue:
3b18152c 3342 return cfqq;
22e2c507
JA
3343}
3344
febffd61 3345static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
d9e7620e
JA
3346{
3347 int dispatched = 0;
3348
3349 while (cfqq->next_rq) {
3350 cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
3351 dispatched++;
3352 }
3353
3354 BUG_ON(!list_empty(&cfqq->fifo));
f04a6424
VG
3355
3356 /* By default cfqq is not expired if it is empty. Do it explicitly */
e5ff082e 3357 __cfq_slice_expired(cfqq->cfqd, cfqq, 0);
d9e7620e
JA
3358 return dispatched;
3359}
3360
498d3aa2
JA
3361/*
3362 * Drain our current requests. Used for barriers and when switching
3363 * io schedulers on-the-fly.
3364 */
d9e7620e 3365static int cfq_forced_dispatch(struct cfq_data *cfqd)
1b5ed5e1 3366{
0871714e 3367 struct cfq_queue *cfqq;
d9e7620e 3368 int dispatched = 0;
cdb16e8f 3369
3440c49f 3370 /* Expire the timeslice of the current active queue first */
e5ff082e 3371 cfq_slice_expired(cfqd, 0);
3440c49f
DS
3372 while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) {
3373 __cfq_set_active_queue(cfqd, cfqq);
f04a6424 3374 dispatched += __cfq_forced_dispatch_cfqq(cfqq);
3440c49f 3375 }
1b5ed5e1 3376
1b5ed5e1
TH
3377 BUG_ON(cfqd->busy_queues);
3378
6923715a 3379 cfq_log(cfqd, "forced_dispatch=%d", dispatched);
1b5ed5e1
TH
3380 return dispatched;
3381}
3382
abc3c744
SL
3383static inline bool cfq_slice_used_soon(struct cfq_data *cfqd,
3384 struct cfq_queue *cfqq)
3385{
9a7f38c4
JM
3386 u64 now = ktime_get_ns();
3387
abc3c744
SL
3388 /* the queue hasn't finished any request, can't estimate */
3389 if (cfq_cfqq_slice_new(cfqq))
c1e44756 3390 return true;
9a7f38c4 3391 if (now + cfqd->cfq_slice_idle * cfqq->dispatched > cfqq->slice_end)
c1e44756 3392 return true;
abc3c744 3393
c1e44756 3394 return false;
abc3c744
SL
3395}
3396
0b182d61 3397static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2f5cb738 3398{
2f5cb738 3399 unsigned int max_dispatch;
22e2c507 3400
3932a86b
GC
3401 if (cfq_cfqq_must_dispatch(cfqq))
3402 return true;
3403
5ad531db
JA
3404 /*
3405 * Drain async requests before we start sync IO
3406 */
53c583d2 3407 if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_flight[BLK_RW_ASYNC])
0b182d61 3408 return false;
5ad531db 3409
2f5cb738
JA
3410 /*
3411 * If this is an async queue and we have sync IO in flight, let it wait
3412 */
53c583d2 3413 if (cfqd->rq_in_flight[BLK_RW_SYNC] && !cfq_cfqq_sync(cfqq))
0b182d61 3414 return false;
2f5cb738 3415
abc3c744 3416 max_dispatch = max_t(unsigned int, cfqd->cfq_quantum / 2, 1);
2f5cb738
JA
3417 if (cfq_class_idle(cfqq))
3418 max_dispatch = 1;
b4878f24 3419
2f5cb738
JA
3420 /*
3421 * Does this cfqq already have too much IO in flight?
3422 */
3423 if (cfqq->dispatched >= max_dispatch) {
ef8a41df 3424 bool promote_sync = false;
2f5cb738
JA
3425 /*
3426 * idle queue must always only have a single IO in flight
3427 */
3ed9a296 3428 if (cfq_class_idle(cfqq))
0b182d61 3429 return false;
3ed9a296 3430
ef8a41df 3431 /*
c4ade94f
LS
3432 * If there is only one sync queue
3433 * we can ignore async queue here and give the sync
ef8a41df
SL
3434 * queue no dispatch limit. The reason is a sync queue can
3435 * preempt async queue, limiting the sync queue doesn't make
3436 * sense. This is useful for aiostress test.
3437 */
c4ade94f
LS
3438 if (cfq_cfqq_sync(cfqq) && cfqd->busy_sync_queues == 1)
3439 promote_sync = true;
ef8a41df 3440
2f5cb738
JA
3441 /*
3442 * We have other queues, don't allow more IO from this one
3443 */
ef8a41df
SL
3444 if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq) &&
3445 !promote_sync)
0b182d61 3446 return false;
9ede209e 3447
365722bb 3448 /*
474b18cc 3449 * Sole queue user, no limit
365722bb 3450 */
ef8a41df 3451 if (cfqd->busy_queues == 1 || promote_sync)
abc3c744
SL
3452 max_dispatch = -1;
3453 else
3454 /*
3455 * Normally we start throttling cfqq when cfq_quantum/2
3456 * requests have been dispatched. But we can drive
3457 * deeper queue depths at the beginning of slice
3458 * subjected to upper limit of cfq_quantum.
3459 * */
3460 max_dispatch = cfqd->cfq_quantum;
8e296755
JA
3461 }
3462
3463 /*
3464 * Async queues must wait a bit before being allowed dispatch.
3465 * We also ramp up the dispatch depth gradually for async IO,
3466 * based on the last sync IO we serviced
3467 */
963b72fc 3468 if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
9a7f38c4 3469 u64 last_sync = ktime_get_ns() - cfqd->last_delayed_sync;
8e296755 3470 unsigned int depth;
365722bb 3471
9a7f38c4 3472 depth = div64_u64(last_sync, cfqd->cfq_slice[1]);
e00c54c3
JA
3473 if (!depth && !cfqq->dispatched)
3474 depth = 1;
8e296755
JA
3475 if (depth < max_dispatch)
3476 max_dispatch = depth;
2f5cb738 3477 }
3ed9a296 3478
0b182d61
JA
3479 /*
3480 * If we're below the current max, allow a dispatch
3481 */
3482 return cfqq->dispatched < max_dispatch;
3483}
3484
3485/*
3486 * Dispatch a request from cfqq, moving them to the request queue
3487 * dispatch list.
3488 */
3489static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3490{
3491 struct request *rq;
3492
3493 BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
3494
3932a86b
GC
3495 rq = cfq_check_fifo(cfqq);
3496 if (rq)
3497 cfq_mark_cfqq_must_dispatch(cfqq);
3498
0b182d61
JA
3499 if (!cfq_may_dispatch(cfqd, cfqq))
3500 return false;
3501
3502 /*
3503 * follow expired path, else get first next available
3504 */
0b182d61
JA
3505 if (!rq)
3506 rq = cfqq->next_rq;
3932a86b
GC
3507 else
3508 cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
0b182d61
JA
3509
3510 /*
3511 * insert request into driver dispatch list
3512 */
3513 cfq_dispatch_insert(cfqd->queue, rq);
3514
3515 if (!cfqd->active_cic) {
c5869807 3516 struct cfq_io_cq *cic = RQ_CIC(rq);
0b182d61 3517
c5869807 3518 atomic_long_inc(&cic->icq.ioc->refcount);
0b182d61
JA
3519 cfqd->active_cic = cic;
3520 }
3521
3522 return true;
3523}
3524
3525/*
3526 * Find the cfqq that we need to service and move a request from that to the
3527 * dispatch list
3528 */
3529static int cfq_dispatch_requests(struct request_queue *q, int force)
3530{
3531 struct cfq_data *cfqd = q->elevator->elevator_data;
3532 struct cfq_queue *cfqq;
3533
3534 if (!cfqd->busy_queues)
3535 return 0;
3536
3537 if (unlikely(force))
3538 return cfq_forced_dispatch(cfqd);
3539
3540 cfqq = cfq_select_queue(cfqd);
3541 if (!cfqq)
8e296755
JA
3542 return 0;
3543
2f5cb738 3544 /*
0b182d61 3545 * Dispatch a request from this cfqq, if it is allowed
2f5cb738 3546 */
0b182d61
JA
3547 if (!cfq_dispatch_request(cfqd, cfqq))
3548 return 0;
3549
2f5cb738 3550 cfqq->slice_dispatch++;
b029195d 3551 cfq_clear_cfqq_must_dispatch(cfqq);
22e2c507 3552
2f5cb738
JA
3553 /*
3554 * expire an async queue immediately if it has used up its slice. idle
3555 * queue always expire after 1 dispatch round.
3556 */
3557 if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
3558 cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
3559 cfq_class_idle(cfqq))) {
9a7f38c4 3560 cfqq->slice_end = ktime_get_ns() + 1;
e5ff082e 3561 cfq_slice_expired(cfqd, 0);
1da177e4
LT
3562 }
3563
b217a903 3564 cfq_log_cfqq(cfqd, cfqq, "dispatched a request");
2f5cb738 3565 return 1;
1da177e4
LT
3566}
3567
1da177e4 3568/*
5e705374
JA
3569 * task holds one reference to the queue, dropped when task exits. each rq
3570 * in-flight on this queue also holds a reference, dropped when rq is freed.
1da177e4 3571 *
b1c35769 3572 * Each cfq queue took a reference on the parent group. Drop it now.
1da177e4
LT
3573 * queue lock must be held here.
3574 */
3575static void cfq_put_queue(struct cfq_queue *cfqq)
3576{
22e2c507 3577 struct cfq_data *cfqd = cfqq->cfqd;
0bbfeb83 3578 struct cfq_group *cfqg;
22e2c507 3579
30d7b944 3580 BUG_ON(cfqq->ref <= 0);
1da177e4 3581
30d7b944
SL
3582 cfqq->ref--;
3583 if (cfqq->ref)
1da177e4
LT
3584 return;
3585
7b679138 3586 cfq_log_cfqq(cfqd, cfqq, "put_queue");
1da177e4 3587 BUG_ON(rb_first(&cfqq->sort_list));
22e2c507 3588 BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
b1c35769 3589 cfqg = cfqq->cfqg;
1da177e4 3590
28f95cbc 3591 if (unlikely(cfqd->active_queue == cfqq)) {
e5ff082e 3592 __cfq_slice_expired(cfqd, cfqq, 0);
23e018a1 3593 cfq_schedule_dispatch(cfqd);
28f95cbc 3594 }
22e2c507 3595
f04a6424 3596 BUG_ON(cfq_cfqq_on_rr(cfqq));
1da177e4 3597 kmem_cache_free(cfq_pool, cfqq);
eb7d8c07 3598 cfqg_put(cfqg);
1da177e4
LT
3599}
3600
d02a2c07 3601static void cfq_put_cooperator(struct cfq_queue *cfqq)
1da177e4 3602{
df5fe3e8
JM
3603 struct cfq_queue *__cfqq, *next;
3604
df5fe3e8
JM
3605 /*
3606 * If this queue was scheduled to merge with another queue, be
3607 * sure to drop the reference taken on that queue (and others in
3608 * the merge chain). See cfq_setup_merge and cfq_merge_cfqqs.
3609 */
3610 __cfqq = cfqq->new_cfqq;
3611 while (__cfqq) {
3612 if (__cfqq == cfqq) {
3613 WARN(1, "cfqq->new_cfqq loop detected\n");
3614 break;
3615 }
3616 next = __cfqq->new_cfqq;
3617 cfq_put_queue(__cfqq);
3618 __cfqq = next;
3619 }
d02a2c07
SL
3620}
3621
3622static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3623{
3624 if (unlikely(cfqq == cfqd->active_queue)) {
3625 __cfq_slice_expired(cfqd, cfqq, 0);
3626 cfq_schedule_dispatch(cfqd);
3627 }
3628
3629 cfq_put_cooperator(cfqq);
df5fe3e8 3630
89850f7e
JA
3631 cfq_put_queue(cfqq);
3632}
22e2c507 3633
9b84cacd
TH
3634static void cfq_init_icq(struct io_cq *icq)
3635{
3636 struct cfq_io_cq *cic = icq_to_cic(icq);
3637
9a7f38c4 3638 cic->ttime.last_end_request = ktime_get_ns();
9b84cacd
TH
3639}
3640
c5869807 3641static void cfq_exit_icq(struct io_cq *icq)
89850f7e 3642{
c5869807 3643 struct cfq_io_cq *cic = icq_to_cic(icq);
283287a5 3644 struct cfq_data *cfqd = cic_to_cfqd(cic);
4faa3c81 3645
563180a4
TH
3646 if (cic_to_cfqq(cic, false)) {
3647 cfq_exit_cfqq(cfqd, cic_to_cfqq(cic, false));
3648 cic_set_cfqq(cic, NULL, false);
12a05732
AV
3649 }
3650
563180a4
TH
3651 if (cic_to_cfqq(cic, true)) {
3652 cfq_exit_cfqq(cfqd, cic_to_cfqq(cic, true));
3653 cic_set_cfqq(cic, NULL, true);
12a05732 3654 }
89850f7e
JA
3655}
3656
abede6da 3657static void cfq_init_prio_data(struct cfq_queue *cfqq, struct cfq_io_cq *cic)
22e2c507
JA
3658{
3659 struct task_struct *tsk = current;
3660 int ioprio_class;
3661
3b18152c 3662 if (!cfq_cfqq_prio_changed(cfqq))
22e2c507
JA
3663 return;
3664
598971bf 3665 ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
22e2c507 3666 switch (ioprio_class) {
fe094d98
JA
3667 default:
3668 printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
3669 case IOPRIO_CLASS_NONE:
3670 /*
6d63c275 3671 * no prio set, inherit CPU scheduling settings
fe094d98
JA
3672 */
3673 cfqq->ioprio = task_nice_ioprio(tsk);
6d63c275 3674 cfqq->ioprio_class = task_nice_ioclass(tsk);
fe094d98
JA
3675 break;
3676 case IOPRIO_CLASS_RT:
598971bf 3677 cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
fe094d98
JA
3678 cfqq->ioprio_class = IOPRIO_CLASS_RT;
3679 break;
3680 case IOPRIO_CLASS_BE:
598971bf 3681 cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
fe094d98
JA
3682 cfqq->ioprio_class = IOPRIO_CLASS_BE;
3683 break;
3684 case IOPRIO_CLASS_IDLE:
3685 cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
3686 cfqq->ioprio = 7;
3687 cfq_clear_cfqq_idle_window(cfqq);
3688 break;
22e2c507
JA
3689 }
3690
3691 /*
3692 * keep track of original prio settings in case we have to temporarily
3693 * elevate the priority of this queue
3694 */
3695 cfqq->org_ioprio = cfqq->ioprio;
b8269db4 3696 cfqq->org_ioprio_class = cfqq->ioprio_class;
3b18152c 3697 cfq_clear_cfqq_prio_changed(cfqq);
22e2c507
JA
3698}
3699
598971bf 3700static void check_ioprio_changed(struct cfq_io_cq *cic, struct bio *bio)
22e2c507 3701{
598971bf 3702 int ioprio = cic->icq.ioc->ioprio;
bca4b914 3703 struct cfq_data *cfqd = cic_to_cfqd(cic);
478a82b0 3704 struct cfq_queue *cfqq;
35e6077c 3705
598971bf
TH
3706 /*
3707 * Check whether ioprio has changed. The condition may trigger
3708 * spuriously on a newly created cic but there's no harm.
3709 */
3710 if (unlikely(!cfqd) || likely(cic->ioprio == ioprio))
caaa5f9f
JA
3711 return;
3712
563180a4 3713 cfqq = cic_to_cfqq(cic, false);
caaa5f9f 3714 if (cfqq) {
563180a4 3715 cfq_put_queue(cfqq);
2da8de0b 3716 cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic, bio);
563180a4 3717 cic_set_cfqq(cic, cfqq, false);
22e2c507 3718 }
caaa5f9f 3719
563180a4 3720 cfqq = cic_to_cfqq(cic, true);
caaa5f9f
JA
3721 if (cfqq)
3722 cfq_mark_cfqq_prio_changed(cfqq);
598971bf
TH
3723
3724 cic->ioprio = ioprio;
22e2c507
JA
3725}
3726
d5036d77 3727static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
a6151c3a 3728 pid_t pid, bool is_sync)
d5036d77
JA
3729{
3730 RB_CLEAR_NODE(&cfqq->rb_node);
3731 RB_CLEAR_NODE(&cfqq->p_node);
3732 INIT_LIST_HEAD(&cfqq->fifo);
3733
30d7b944 3734 cfqq->ref = 0;
d5036d77
JA
3735 cfqq->cfqd = cfqd;
3736
3737 cfq_mark_cfqq_prio_changed(cfqq);
3738
3739 if (is_sync) {
3740 if (!cfq_class_idle(cfqq))
3741 cfq_mark_cfqq_idle_window(cfqq);
3742 cfq_mark_cfqq_sync(cfqq);
3743 }
3744 cfqq->pid = pid;
3745}
3746
24610333 3747#ifdef CONFIG_CFQ_GROUP_IOSCHED
142bbdfc 3748static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
24610333 3749{
bca4b914 3750 struct cfq_data *cfqd = cic_to_cfqd(cic);
60a83707 3751 struct cfq_queue *cfqq;
f4da8072 3752 uint64_t serial_nr;
24610333 3753
598971bf 3754 rcu_read_lock();
f4da8072 3755 serial_nr = bio_blkcg(bio)->css.serial_nr;
598971bf 3756 rcu_read_unlock();
24610333 3757
598971bf
TH
3758 /*
3759 * Check whether blkcg has changed. The condition may trigger
3760 * spuriously on a newly created cic but there's no harm.
3761 */
f4da8072 3762 if (unlikely(!cfqd) || likely(cic->blkcg_serial_nr == serial_nr))
142bbdfc 3763 return;
87760e5e 3764
60a83707
TH
3765 /*
3766 * Drop reference to queues. New queues will be assigned in new
3767 * group upon arrival of fresh requests.
3768 */
3769 cfqq = cic_to_cfqq(cic, false);
3770 if (cfqq) {
3771 cfq_log_cfqq(cfqd, cfqq, "changed cgroup");
3772 cic_set_cfqq(cic, NULL, false);
3773 cfq_put_queue(cfqq);
3774 }
3775
3776 cfqq = cic_to_cfqq(cic, true);
3777 if (cfqq) {
3778 cfq_log_cfqq(cfqd, cfqq, "changed cgroup");
3779 cic_set_cfqq(cic, NULL, true);
3780 cfq_put_queue(cfqq);
24610333 3781 }
598971bf 3782
f4da8072 3783 cic->blkcg_serial_nr = serial_nr;
24610333 3784}
598971bf 3785#else
142bbdfc 3786static inline void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
5d7f5ce1 3787{
5d7f5ce1 3788}
24610333
VG
3789#endif /* CONFIG_CFQ_GROUP_IOSCHED */
3790
c2dea2d1 3791static struct cfq_queue **
60a83707 3792cfq_async_queue_prio(struct cfq_group *cfqg, int ioprio_class, int ioprio)
c2dea2d1 3793{
fe094d98 3794 switch (ioprio_class) {
c2dea2d1 3795 case IOPRIO_CLASS_RT:
60a83707 3796 return &cfqg->async_cfqq[0][ioprio];
598971bf
TH
3797 case IOPRIO_CLASS_NONE:
3798 ioprio = IOPRIO_NORM;
3799 /* fall through */
c2dea2d1 3800 case IOPRIO_CLASS_BE:
60a83707 3801 return &cfqg->async_cfqq[1][ioprio];
c2dea2d1 3802 case IOPRIO_CLASS_IDLE:
60a83707 3803 return &cfqg->async_idle_cfqq;
c2dea2d1
VT
3804 default:
3805 BUG();
3806 }
3807}
3808
15c31be4 3809static struct cfq_queue *
abede6da 3810cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
2da8de0b 3811 struct bio *bio)
15c31be4 3812{
c6ce1943
JM
3813 int ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
3814 int ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
d4aad7ff 3815 struct cfq_queue **async_cfqq = NULL;
4ebc1c61 3816 struct cfq_queue *cfqq;
322731ed
TH
3817 struct cfq_group *cfqg;
3818
3819 rcu_read_lock();
ae118896 3820 cfqg = cfq_lookup_cfqg(cfqd, bio_blkcg(bio));
322731ed
TH
3821 if (!cfqg) {
3822 cfqq = &cfqd->oom_cfqq;
3823 goto out;
3824 }
15c31be4 3825
c2dea2d1 3826 if (!is_sync) {
c6ce1943
JM
3827 if (!ioprio_valid(cic->ioprio)) {
3828 struct task_struct *tsk = current;
3829 ioprio = task_nice_ioprio(tsk);
3830 ioprio_class = task_nice_ioclass(tsk);
3831 }
60a83707 3832 async_cfqq = cfq_async_queue_prio(cfqg, ioprio_class, ioprio);
c2dea2d1 3833 cfqq = *async_cfqq;
4ebc1c61
TH
3834 if (cfqq)
3835 goto out;
c2dea2d1
VT
3836 }
3837
e00f4f4d
TH
3838 cfqq = kmem_cache_alloc_node(cfq_pool,
3839 GFP_NOWAIT | __GFP_ZERO | __GFP_NOWARN,
d4aad7ff
TH
3840 cfqd->queue->node);
3841 if (!cfqq) {
3842 cfqq = &cfqd->oom_cfqq;
3843 goto out;
3844 }
3845
4d608baa
AP
3846 /* cfq_init_cfqq() assumes cfqq->ioprio_class is initialized. */
3847 cfqq->ioprio_class = IOPRIO_CLASS_NONE;
d4aad7ff
TH
3848 cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
3849 cfq_init_prio_data(cfqq, cic);
3850 cfq_link_cfqq_cfqg(cfqq, cfqg);
3851 cfq_log_cfqq(cfqd, cfqq, "alloced");
15c31be4 3852
d4aad7ff
TH
3853 if (async_cfqq) {
3854 /* a new async queue is created, pin and remember */
30d7b944 3855 cfqq->ref++;
c2dea2d1 3856 *async_cfqq = cfqq;
15c31be4 3857 }
4ebc1c61 3858out:
30d7b944 3859 cfqq->ref++;
322731ed 3860 rcu_read_unlock();
15c31be4
JA
3861 return cfqq;
3862}
3863
22e2c507 3864static void
9a7f38c4 3865__cfq_update_io_thinktime(struct cfq_ttime *ttime, u64 slice_idle)
1da177e4 3866{
9a7f38c4 3867 u64 elapsed = ktime_get_ns() - ttime->last_end_request;
383cd721 3868 elapsed = min(elapsed, 2UL * slice_idle);
db3b5848 3869
383cd721 3870 ttime->ttime_samples = (7*ttime->ttime_samples + 256) / 8;
9a7f38c4
JM
3871 ttime->ttime_total = div_u64(7*ttime->ttime_total + 256*elapsed, 8);
3872 ttime->ttime_mean = div64_ul(ttime->ttime_total + 128,
3873 ttime->ttime_samples);
383cd721
SL
3874}
3875
3876static void
3877cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
c5869807 3878 struct cfq_io_cq *cic)
383cd721 3879{
f5f2b6ce 3880 if (cfq_cfqq_sync(cfqq)) {
383cd721 3881 __cfq_update_io_thinktime(&cic->ttime, cfqd->cfq_slice_idle);
f5f2b6ce
SL
3882 __cfq_update_io_thinktime(&cfqq->service_tree->ttime,
3883 cfqd->cfq_slice_idle);
3884 }
7700fc4f
SL
3885#ifdef CONFIG_CFQ_GROUP_IOSCHED
3886 __cfq_update_io_thinktime(&cfqq->cfqg->ttime, cfqd->cfq_group_idle);
3887#endif
22e2c507 3888}
1da177e4 3889
206dc69b 3890static void
b2c18e1e 3891cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
6d048f53 3892 struct request *rq)
206dc69b 3893{
3dde36dd 3894 sector_t sdist = 0;
41647e7a 3895 sector_t n_sec = blk_rq_sectors(rq);
3dde36dd
CZ
3896 if (cfqq->last_request_pos) {
3897 if (cfqq->last_request_pos < blk_rq_pos(rq))
3898 sdist = blk_rq_pos(rq) - cfqq->last_request_pos;
3899 else
3900 sdist = cfqq->last_request_pos - blk_rq_pos(rq);
3901 }
206dc69b 3902
3dde36dd 3903 cfqq->seek_history <<= 1;
41647e7a
CZ
3904 if (blk_queue_nonrot(cfqd->queue))
3905 cfqq->seek_history |= (n_sec < CFQQ_SECT_THR_NONROT);
3906 else
3907 cfqq->seek_history |= (sdist > CFQQ_SEEK_THR);
206dc69b 3908}
1da177e4 3909
a2b80967
CH
3910static inline bool req_noidle(struct request *req)
3911{
3912 return req_op(req) == REQ_OP_WRITE &&
3913 (req->cmd_flags & (REQ_SYNC | REQ_IDLE)) == REQ_SYNC;
3914}
3915
22e2c507
JA
3916/*
3917 * Disable idle window if the process thinks too long or seeks so much that
3918 * it doesn't matter
3919 */
3920static void
3921cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
c5869807 3922 struct cfq_io_cq *cic)
22e2c507 3923{
7b679138 3924 int old_idle, enable_idle;
1be92f2f 3925
0871714e
JA
3926 /*
3927 * Don't idle for async or idle io prio class
3928 */
3929 if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq))
1be92f2f
JA
3930 return;
3931
c265a7f4 3932 enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
1da177e4 3933
76280aff
CZ
3934 if (cfqq->queued[0] + cfqq->queued[1] >= 4)
3935 cfq_mark_cfqq_deep(cfqq);
3936
a2b80967 3937 if (cfqq->next_rq && req_noidle(cfqq->next_rq))
749ef9f8 3938 enable_idle = 0;
f6e8d01b 3939 else if (!atomic_read(&cic->icq.ioc->active_ref) ||
c5869807
TH
3940 !cfqd->cfq_slice_idle ||
3941 (!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq)))
22e2c507 3942 enable_idle = 0;
383cd721
SL
3943 else if (sample_valid(cic->ttime.ttime_samples)) {
3944 if (cic->ttime.ttime_mean > cfqd->cfq_slice_idle)
22e2c507
JA
3945 enable_idle = 0;
3946 else
3947 enable_idle = 1;
1da177e4
LT
3948 }
3949
7b679138
JA
3950 if (old_idle != enable_idle) {
3951 cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle);
3952 if (enable_idle)
3953 cfq_mark_cfqq_idle_window(cfqq);
3954 else
3955 cfq_clear_cfqq_idle_window(cfqq);
3956 }
22e2c507 3957}
1da177e4 3958
22e2c507
JA
3959/*
3960 * Check if new_cfqq should preempt the currently active queue. Return 0 for
3961 * no or if we aren't sure, a 1 will cause a preempt.
3962 */
a6151c3a 3963static bool
22e2c507 3964cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
5e705374 3965 struct request *rq)
22e2c507 3966{
6d048f53 3967 struct cfq_queue *cfqq;
22e2c507 3968
6d048f53
JA
3969 cfqq = cfqd->active_queue;
3970 if (!cfqq)
a6151c3a 3971 return false;
22e2c507 3972
6d048f53 3973 if (cfq_class_idle(new_cfqq))
a6151c3a 3974 return false;
22e2c507
JA
3975
3976 if (cfq_class_idle(cfqq))
a6151c3a 3977 return true;
1e3335de 3978
875feb63
DS
3979 /*
3980 * Don't allow a non-RT request to preempt an ongoing RT cfqq timeslice.
3981 */
3982 if (cfq_class_rt(cfqq) && !cfq_class_rt(new_cfqq))
3983 return false;
3984
374f84ac
JA
3985 /*
3986 * if the new request is sync, but the currently running queue is
3987 * not, let the sync request have priority.
3988 */
3932a86b 3989 if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq) && !cfq_cfqq_must_dispatch(cfqq))
a6151c3a 3990 return true;
1e3335de 3991
3984aa55
JK
3992 /*
3993 * Treat ancestors of current cgroup the same way as current cgroup.
3994 * For anybody else we disallow preemption to guarantee service
3995 * fairness among cgroups.
3996 */
3997 if (!cfqg_is_descendant(cfqq->cfqg, new_cfqq->cfqg))
8682e1f1
VG
3998 return false;
3999
4000 if (cfq_slice_used(cfqq))
4001 return true;
4002
6c80731c
JK
4003 /*
4004 * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
4005 */
4006 if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
4007 return true;
4008
4009 WARN_ON_ONCE(cfqq->ioprio_class != new_cfqq->ioprio_class);
8682e1f1 4010 /* Allow preemption only if we are idling on sync-noidle tree */
4d2ceea4 4011 if (cfqd->serving_wl_type == SYNC_NOIDLE_WORKLOAD &&
8682e1f1 4012 cfqq_type(new_cfqq) == SYNC_NOIDLE_WORKLOAD &&
8682e1f1
VG
4013 RB_EMPTY_ROOT(&cfqq->sort_list))
4014 return true;
4015
b53d1ed7
JA
4016 /*
4017 * So both queues are sync. Let the new request get disk time if
4018 * it's a metadata request and the current queue is doing regular IO.
4019 */
65299a3b 4020 if ((rq->cmd_flags & REQ_PRIO) && !cfqq->prio_pending)
b53d1ed7
JA
4021 return true;
4022
d2d59e18
SL
4023 /* An idle queue should not be idle now for some reason */
4024 if (RB_EMPTY_ROOT(&cfqq->sort_list) && !cfq_should_idle(cfqd, cfqq))
4025 return true;
4026
1e3335de 4027 if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
a6151c3a 4028 return false;
1e3335de
JA
4029
4030 /*
4031 * if this request is as-good as one we would expect from the
4032 * current cfqq, let it preempt
4033 */
e9ce335d 4034 if (cfq_rq_close(cfqd, cfqq, rq))
a6151c3a 4035 return true;
1e3335de 4036
a6151c3a 4037 return false;
22e2c507
JA
4038}
4039
4040/*
4041 * cfqq preempts the active queue. if we allowed preempt with no slice left,
4042 * let it have half of its nominal slice.
4043 */
4044static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
4045{
df0793ab
SL
4046 enum wl_type_t old_type = cfqq_type(cfqd->active_queue);
4047
7b679138 4048 cfq_log_cfqq(cfqd, cfqq, "preempt");
df0793ab 4049 cfq_slice_expired(cfqd, 1);
22e2c507 4050
f8ae6e3e
SL
4051 /*
4052 * workload type is changed, don't save slice, otherwise preempt
4053 * doesn't happen
4054 */
df0793ab 4055 if (old_type != cfqq_type(cfqq))
4d2ceea4 4056 cfqq->cfqg->saved_wl_slice = 0;
f8ae6e3e 4057
bf572256
JA
4058 /*
4059 * Put the new queue at the front of the of the current list,
4060 * so we know that it will be selected next.
4061 */
4062 BUG_ON(!cfq_cfqq_on_rr(cfqq));
edd75ffd
JA
4063
4064 cfq_service_tree_add(cfqd, cfqq, 1);
eda5e0c9 4065
62a37f6b
JT
4066 cfqq->slice_end = 0;
4067 cfq_mark_cfqq_slice_new(cfqq);
22e2c507
JA
4068}
4069
22e2c507 4070/*
5e705374 4071 * Called when a new fs request (rq) is added (to cfqq). Check if there's
22e2c507
JA
4072 * something we should do about it
4073 */
4074static void
5e705374
JA
4075cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
4076 struct request *rq)
22e2c507 4077{
c5869807 4078 struct cfq_io_cq *cic = RQ_CIC(rq);
12e9fddd 4079
45333d5a 4080 cfqd->rq_queued++;
65299a3b
CH
4081 if (rq->cmd_flags & REQ_PRIO)
4082 cfqq->prio_pending++;
374f84ac 4083
383cd721 4084 cfq_update_io_thinktime(cfqd, cfqq, cic);
b2c18e1e 4085 cfq_update_io_seektime(cfqd, cfqq, rq);
9c2c38a1
JA
4086 cfq_update_idle_window(cfqd, cfqq, cic);
4087
b2c18e1e 4088 cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
22e2c507
JA
4089
4090 if (cfqq == cfqd->active_queue) {
4091 /*
b029195d
JA
4092 * Remember that we saw a request from this process, but
4093 * don't start queuing just yet. Otherwise we risk seeing lots
4094 * of tiny requests, because we disrupt the normal plugging
d6ceb25e
JA
4095 * and merging. If the request is already larger than a single
4096 * page, let it rip immediately. For that case we assume that
2d870722
JA
4097 * merging is already done. Ditto for a busy system that
4098 * has other work pending, don't risk delaying until the
4099 * idle timer unplug to continue working.
22e2c507 4100 */
d6ceb25e 4101 if (cfq_cfqq_wait_request(cfqq)) {
09cbfeaf 4102 if (blk_rq_bytes(rq) > PAGE_SIZE ||
2d870722 4103 cfqd->busy_queues > 1) {
812df48d 4104 cfq_del_timer(cfqd, cfqq);
554554f6 4105 cfq_clear_cfqq_wait_request(cfqq);
24ecfbe2 4106 __blk_run_queue(cfqd->queue);
a11cdaa7 4107 } else {
155fead9 4108 cfqg_stats_update_idle_time(cfqq->cfqg);
bf791937 4109 cfq_mark_cfqq_must_dispatch(cfqq);
a11cdaa7 4110 }
d6ceb25e 4111 }
5e705374 4112 } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
22e2c507
JA
4113 /*
4114 * not the active queue - expire current slice if it is
4115 * idle and has expired it's mean thinktime or this new queue
3a9a3f6c
DS
4116 * has some old slice time left and is of higher priority or
4117 * this new queue is RT and the current one is BE
22e2c507
JA
4118 */
4119 cfq_preempt_queue(cfqd, cfqq);
24ecfbe2 4120 __blk_run_queue(cfqd->queue);
22e2c507 4121 }
1da177e4
LT
4122}
4123
165125e1 4124static void cfq_insert_request(struct request_queue *q, struct request *rq)
1da177e4 4125{
b4878f24 4126 struct cfq_data *cfqd = q->elevator->elevator_data;
5e705374 4127 struct cfq_queue *cfqq = RQ_CFQQ(rq);
22e2c507 4128
7b679138 4129 cfq_log_cfqq(cfqd, cfqq, "insert_request");
abede6da 4130 cfq_init_prio_data(cfqq, RQ_CIC(rq));
1da177e4 4131
9a7f38c4 4132 rq->fifo_time = ktime_get_ns() + cfqd->cfq_fifo_expire[rq_is_sync(rq)];
22e2c507 4133 list_add_tail(&rq->queuelist, &cfqq->fifo);
aa6f6a3d 4134 cfq_add_rq_rb(rq);
ef295ecf 4135 cfqg_stats_update_io_add(RQ_CFQG(rq), cfqd->serving_group,
155fead9 4136 rq->cmd_flags);
5e705374 4137 cfq_rq_enqueued(cfqd, cfqq, rq);
1da177e4
LT
4138}
4139
45333d5a
AC
4140/*
4141 * Update hw_tag based on peak queue depth over 50 samples under
4142 * sufficient load.
4143 */
4144static void cfq_update_hw_tag(struct cfq_data *cfqd)
4145{
1a1238a7
SL
4146 struct cfq_queue *cfqq = cfqd->active_queue;
4147
53c583d2
CZ
4148 if (cfqd->rq_in_driver > cfqd->hw_tag_est_depth)
4149 cfqd->hw_tag_est_depth = cfqd->rq_in_driver;
e459dd08
CZ
4150
4151 if (cfqd->hw_tag == 1)
4152 return;
45333d5a
AC
4153
4154 if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN &&
53c583d2 4155 cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN)
45333d5a
AC
4156 return;
4157
1a1238a7
SL
4158 /*
4159 * If active queue hasn't enough requests and can idle, cfq might not
4160 * dispatch sufficient requests to hardware. Don't zero hw_tag in this
4161 * case
4162 */
4163 if (cfqq && cfq_cfqq_idle_window(cfqq) &&
4164 cfqq->dispatched + cfqq->queued[0] + cfqq->queued[1] <
53c583d2 4165 CFQ_HW_QUEUE_MIN && cfqd->rq_in_driver < CFQ_HW_QUEUE_MIN)
1a1238a7
SL
4166 return;
4167
45333d5a
AC
4168 if (cfqd->hw_tag_samples++ < 50)
4169 return;
4170
e459dd08 4171 if (cfqd->hw_tag_est_depth >= CFQ_HW_QUEUE_MIN)
45333d5a
AC
4172 cfqd->hw_tag = 1;
4173 else
4174 cfqd->hw_tag = 0;
45333d5a
AC
4175}
4176
7667aa06
VG
4177static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq)
4178{
c5869807 4179 struct cfq_io_cq *cic = cfqd->active_cic;
9a7f38c4 4180 u64 now = ktime_get_ns();
7667aa06 4181
02a8f01b
JT
4182 /* If the queue already has requests, don't wait */
4183 if (!RB_EMPTY_ROOT(&cfqq->sort_list))
4184 return false;
4185
7667aa06
VG
4186 /* If there are other queues in the group, don't wait */
4187 if (cfqq->cfqg->nr_cfqq > 1)
4188 return false;
4189
7700fc4f
SL
4190 /* the only queue in the group, but think time is big */
4191 if (cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true))
4192 return false;
4193
7667aa06
VG
4194 if (cfq_slice_used(cfqq))
4195 return true;
4196
4197 /* if slice left is less than think time, wait busy */
383cd721 4198 if (cic && sample_valid(cic->ttime.ttime_samples)
9a7f38c4 4199 && (cfqq->slice_end - now < cic->ttime.ttime_mean))
7667aa06
VG
4200 return true;
4201
4202 /*
4203 * If think times is less than a jiffy than ttime_mean=0 and above
4204 * will not be true. It might happen that slice has not expired yet
4205 * but will expire soon (4-5 ns) during select_queue(). To cover the
4206 * case where think time is less than a jiffy, mark the queue wait
4207 * busy if only 1 jiffy is left in the slice.
4208 */
9a7f38c4 4209 if (cfqq->slice_end - now <= jiffies_to_nsecs(1))
7667aa06
VG
4210 return true;
4211
4212 return false;
4213}
4214
165125e1 4215static void cfq_completed_request(struct request_queue *q, struct request *rq)
1da177e4 4216{
5e705374 4217 struct cfq_queue *cfqq = RQ_CFQQ(rq);
b4878f24 4218 struct cfq_data *cfqd = cfqq->cfqd;
5380a101 4219 const int sync = rq_is_sync(rq);
9a7f38c4 4220 u64 now = ktime_get_ns();
1da177e4 4221
a2b80967 4222 cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d", req_noidle(rq));
1da177e4 4223
45333d5a
AC
4224 cfq_update_hw_tag(cfqd);
4225
53c583d2 4226 WARN_ON(!cfqd->rq_in_driver);
6d048f53 4227 WARN_ON(!cfqq->dispatched);
53c583d2 4228 cfqd->rq_in_driver--;
6d048f53 4229 cfqq->dispatched--;
80bdf0c7 4230 (RQ_CFQG(rq))->dispatched--;
522a7775
OS
4231 cfqg_stats_update_completion(cfqq->cfqg, rq->start_time_ns,
4232 rq->io_start_time_ns, rq->cmd_flags);
1da177e4 4233
53c583d2 4234 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
3ed9a296 4235
365722bb 4236 if (sync) {
34b98d03 4237 struct cfq_rb_root *st;
f5f2b6ce 4238
383cd721 4239 RQ_CIC(rq)->ttime.last_end_request = now;
f5f2b6ce
SL
4240
4241 if (cfq_cfqq_on_rr(cfqq))
34b98d03 4242 st = cfqq->service_tree;
f5f2b6ce 4243 else
34b98d03
VG
4244 st = st_for(cfqq->cfqg, cfqq_class(cfqq),
4245 cfqq_type(cfqq));
4246
4247 st->ttime.last_end_request = now;
522a7775 4248 if (rq->start_time_ns + cfqd->cfq_fifo_expire[1] <= now)
573412b2 4249 cfqd->last_delayed_sync = now;
365722bb 4250 }
caaa5f9f 4251
7700fc4f
SL
4252#ifdef CONFIG_CFQ_GROUP_IOSCHED
4253 cfqq->cfqg->ttime.last_end_request = now;
4254#endif
4255
caaa5f9f
JA
4256 /*
4257 * If this is the active queue, check if it needs to be expired,
4258 * or if we want to idle in case it has no pending requests.
4259 */
4260 if (cfqd->active_queue == cfqq) {
a36e71f9
JA
4261 const bool cfqq_empty = RB_EMPTY_ROOT(&cfqq->sort_list);
4262
44f7c160
JA
4263 if (cfq_cfqq_slice_new(cfqq)) {
4264 cfq_set_prio_slice(cfqd, cfqq);
4265 cfq_clear_cfqq_slice_new(cfqq);
4266 }
f75edf2d
VG
4267
4268 /*
7667aa06
VG
4269 * Should we wait for next request to come in before we expire
4270 * the queue.
f75edf2d 4271 */
7667aa06 4272 if (cfq_should_wait_busy(cfqd, cfqq)) {
9a7f38c4 4273 u64 extend_sl = cfqd->cfq_slice_idle;
80bdf0c7
VG
4274 if (!cfqd->cfq_slice_idle)
4275 extend_sl = cfqd->cfq_group_idle;
9a7f38c4 4276 cfqq->slice_end = now + extend_sl;
f75edf2d 4277 cfq_mark_cfqq_wait_busy(cfqq);
b1ffe737 4278 cfq_log_cfqq(cfqd, cfqq, "will busy wait");
f75edf2d
VG
4279 }
4280
a36e71f9 4281 /*
8e550632
CZ
4282 * Idling is not enabled on:
4283 * - expired queues
4284 * - idle-priority queues
4285 * - async queues
4286 * - queues with still some requests queued
4287 * - when there is a close cooperator
a36e71f9 4288 */
0871714e 4289 if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
e5ff082e 4290 cfq_slice_expired(cfqd, 1);
8e550632
CZ
4291 else if (sync && cfqq_empty &&
4292 !cfq_close_cooperator(cfqd, cfqq)) {
749ef9f8 4293 cfq_arm_slice_timer(cfqd);
8e550632 4294 }
caaa5f9f 4295 }
6d048f53 4296
53c583d2 4297 if (!cfqd->rq_in_driver)
23e018a1 4298 cfq_schedule_dispatch(cfqd);
1da177e4
LT
4299}
4300
ef295ecf 4301static void cfqq_boost_on_prio(struct cfq_queue *cfqq, unsigned int op)
b8269db4
JA
4302{
4303 /*
4304 * If REQ_PRIO is set, boost class and prio level, if it's below
4305 * BE/NORM. If prio is not set, restore the potentially boosted
4306 * class/prio level.
4307 */
ef295ecf 4308 if (!(op & REQ_PRIO)) {
b8269db4
JA
4309 cfqq->ioprio_class = cfqq->org_ioprio_class;
4310 cfqq->ioprio = cfqq->org_ioprio;
4311 } else {
4312 if (cfq_class_idle(cfqq))
4313 cfqq->ioprio_class = IOPRIO_CLASS_BE;
4314 if (cfqq->ioprio > IOPRIO_NORM)
4315 cfqq->ioprio = IOPRIO_NORM;
4316 }
4317}
4318
89850f7e 4319static inline int __cfq_may_queue(struct cfq_queue *cfqq)
22e2c507 4320{
1b379d8d 4321 if (cfq_cfqq_wait_request(cfqq) && !cfq_cfqq_must_alloc_slice(cfqq)) {
3b18152c 4322 cfq_mark_cfqq_must_alloc_slice(cfqq);
22e2c507 4323 return ELV_MQUEUE_MUST;
3b18152c 4324 }
1da177e4 4325
22e2c507 4326 return ELV_MQUEUE_MAY;
22e2c507
JA
4327}
4328
ef295ecf 4329static int cfq_may_queue(struct request_queue *q, unsigned int op)
22e2c507
JA
4330{
4331 struct cfq_data *cfqd = q->elevator->elevator_data;
4332 struct task_struct *tsk = current;
c5869807 4333 struct cfq_io_cq *cic;
22e2c507
JA
4334 struct cfq_queue *cfqq;
4335
4336 /*
4337 * don't force setup of a queue from here, as a call to may_queue
4338 * does not necessarily imply that a request actually will be queued.
4339 * so just lookup a possibly existing queue, or return 'may queue'
4340 * if that fails
4341 */
4ac845a2 4342 cic = cfq_cic_lookup(cfqd, tsk->io_context);
91fac317
VT
4343 if (!cic)
4344 return ELV_MQUEUE_MAY;
4345
ef295ecf 4346 cfqq = cic_to_cfqq(cic, op_is_sync(op));
22e2c507 4347 if (cfqq) {
abede6da 4348 cfq_init_prio_data(cfqq, cic);
ef295ecf 4349 cfqq_boost_on_prio(cfqq, op);
22e2c507 4350
89850f7e 4351 return __cfq_may_queue(cfqq);
22e2c507
JA
4352 }
4353
4354 return ELV_MQUEUE_MAY;
1da177e4
LT
4355}
4356
1da177e4
LT
4357/*
4358 * queue lock held here
4359 */
bb37b94c 4360static void cfq_put_request(struct request *rq)
1da177e4 4361{
5e705374 4362 struct cfq_queue *cfqq = RQ_CFQQ(rq);
1da177e4 4363
5e705374 4364 if (cfqq) {
22e2c507 4365 const int rw = rq_data_dir(rq);
1da177e4 4366
22e2c507
JA
4367 BUG_ON(!cfqq->allocated[rw]);
4368 cfqq->allocated[rw]--;
1da177e4 4369
7f1dc8a2 4370 /* Put down rq reference on cfqg */
eb7d8c07 4371 cfqg_put(RQ_CFQG(rq));
a612fddf
TH
4372 rq->elv.priv[0] = NULL;
4373 rq->elv.priv[1] = NULL;
7f1dc8a2 4374
1da177e4
LT
4375 cfq_put_queue(cfqq);
4376 }
4377}
4378
df5fe3e8 4379static struct cfq_queue *
c5869807 4380cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_cq *cic,
df5fe3e8
JM
4381 struct cfq_queue *cfqq)
4382{
4383 cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq);
4384 cic_set_cfqq(cic, cfqq->new_cfqq, 1);
b3b6d040 4385 cfq_mark_cfqq_coop(cfqq->new_cfqq);
df5fe3e8
JM
4386 cfq_put_queue(cfqq);
4387 return cic_to_cfqq(cic, 1);
4388}
4389
e6c5bc73
JM
4390/*
4391 * Returns NULL if a new cfqq should be allocated, or the old cfqq if this
4392 * was the last process referring to said cfqq.
4393 */
4394static struct cfq_queue *
c5869807 4395split_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq)
e6c5bc73
JM
4396{
4397 if (cfqq_process_refs(cfqq) == 1) {
e6c5bc73
JM
4398 cfqq->pid = current->pid;
4399 cfq_clear_cfqq_coop(cfqq);
ae54abed 4400 cfq_clear_cfqq_split_coop(cfqq);
e6c5bc73
JM
4401 return cfqq;
4402 }
4403
4404 cic_set_cfqq(cic, NULL, 1);
d02a2c07
SL
4405
4406 cfq_put_cooperator(cfqq);
4407
e6c5bc73
JM
4408 cfq_put_queue(cfqq);
4409 return NULL;
4410}
1da177e4 4411/*
22e2c507 4412 * Allocate cfq data structures associated with this request.
1da177e4 4413 */
22e2c507 4414static int
852c788f
TH
4415cfq_set_request(struct request_queue *q, struct request *rq, struct bio *bio,
4416 gfp_t gfp_mask)
1da177e4
LT
4417{
4418 struct cfq_data *cfqd = q->elevator->elevator_data;
f1f8cc94 4419 struct cfq_io_cq *cic = icq_to_cic(rq->elv.icq);
1da177e4 4420 const int rw = rq_data_dir(rq);
a6151c3a 4421 const bool is_sync = rq_is_sync(rq);
22e2c507 4422 struct cfq_queue *cfqq;
1da177e4 4423
216284c3 4424 spin_lock_irq(q->queue_lock);
f1f8cc94 4425
598971bf 4426 check_ioprio_changed(cic, bio);
142bbdfc 4427 check_blkcg_changed(cic, bio);
e6c5bc73 4428new_queue:
91fac317 4429 cfqq = cic_to_cfqq(cic, is_sync);
32f2e807 4430 if (!cfqq || cfqq == &cfqd->oom_cfqq) {
bce6133b
TH
4431 if (cfqq)
4432 cfq_put_queue(cfqq);
2da8de0b 4433 cfqq = cfq_get_queue(cfqd, is_sync, cic, bio);
91fac317 4434 cic_set_cfqq(cic, cfqq, is_sync);
df5fe3e8 4435 } else {
e6c5bc73
JM
4436 /*
4437 * If the queue was seeky for too long, break it apart.
4438 */
ae54abed 4439 if (cfq_cfqq_coop(cfqq) && cfq_cfqq_split_coop(cfqq)) {
e6c5bc73
JM
4440 cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq");
4441 cfqq = split_cfqq(cic, cfqq);
4442 if (!cfqq)
4443 goto new_queue;
4444 }
4445
df5fe3e8
JM
4446 /*
4447 * Check to see if this queue is scheduled to merge with
4448 * another, closely cooperating queue. The merging of
4449 * queues happens here as it must be done in process context.
4450 * The reference on new_cfqq was taken in merge_cfqqs.
4451 */
4452 if (cfqq->new_cfqq)
4453 cfqq = cfq_merge_cfqqs(cfqd, cic, cfqq);
91fac317 4454 }
1da177e4
LT
4455
4456 cfqq->allocated[rw]++;
1da177e4 4457
6fae9c25 4458 cfqq->ref++;
eb7d8c07 4459 cfqg_get(cfqq->cfqg);
a612fddf 4460 rq->elv.priv[0] = cfqq;
1adaf3dd 4461 rq->elv.priv[1] = cfqq->cfqg;
216284c3 4462 spin_unlock_irq(q->queue_lock);
5d7f5ce1 4463
5e705374 4464 return 0;
1da177e4
LT
4465}
4466
65f27f38 4467static void cfq_kick_queue(struct work_struct *work)
22e2c507 4468{
65f27f38 4469 struct cfq_data *cfqd =
23e018a1 4470 container_of(work, struct cfq_data, unplug_work);
165125e1 4471 struct request_queue *q = cfqd->queue;
22e2c507 4472
40bb54d1 4473 spin_lock_irq(q->queue_lock);
24ecfbe2 4474 __blk_run_queue(cfqd->queue);
40bb54d1 4475 spin_unlock_irq(q->queue_lock);
22e2c507
JA
4476}
4477
4478/*
4479 * Timer running if the active_queue is currently idling inside its time slice
4480 */
91148325 4481static enum hrtimer_restart cfq_idle_slice_timer(struct hrtimer *timer)
22e2c507 4482{
91148325
JK
4483 struct cfq_data *cfqd = container_of(timer, struct cfq_data,
4484 idle_slice_timer);
22e2c507
JA
4485 struct cfq_queue *cfqq;
4486 unsigned long flags;
3c6bd2f8 4487 int timed_out = 1;
22e2c507 4488
7b679138
JA
4489 cfq_log(cfqd, "idle timer fired");
4490
22e2c507
JA
4491 spin_lock_irqsave(cfqd->queue->queue_lock, flags);
4492
fe094d98
JA
4493 cfqq = cfqd->active_queue;
4494 if (cfqq) {
3c6bd2f8
JA
4495 timed_out = 0;
4496
b029195d
JA
4497 /*
4498 * We saw a request before the queue expired, let it through
4499 */
4500 if (cfq_cfqq_must_dispatch(cfqq))
4501 goto out_kick;
4502
22e2c507
JA
4503 /*
4504 * expired
4505 */
44f7c160 4506 if (cfq_slice_used(cfqq))
22e2c507
JA
4507 goto expire;
4508
4509 /*
4510 * only expire and reinvoke request handler, if there are
4511 * other queues with pending requests
4512 */
caaa5f9f 4513 if (!cfqd->busy_queues)
22e2c507 4514 goto out_cont;
22e2c507
JA
4515
4516 /*
4517 * not expired and it has a request pending, let it dispatch
4518 */
75e50984 4519 if (!RB_EMPTY_ROOT(&cfqq->sort_list))
22e2c507 4520 goto out_kick;
76280aff
CZ
4521
4522 /*
4523 * Queue depth flag is reset only when the idle didn't succeed
4524 */
4525 cfq_clear_cfqq_deep(cfqq);
22e2c507
JA
4526 }
4527expire:
e5ff082e 4528 cfq_slice_expired(cfqd, timed_out);
22e2c507 4529out_kick:
23e018a1 4530 cfq_schedule_dispatch(cfqd);
22e2c507
JA
4531out_cont:
4532 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
91148325 4533 return HRTIMER_NORESTART;
22e2c507
JA
4534}
4535
3b18152c
JA
4536static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
4537{
91148325 4538 hrtimer_cancel(&cfqd->idle_slice_timer);
23e018a1 4539 cancel_work_sync(&cfqd->unplug_work);
3b18152c 4540}
22e2c507 4541
b374d18a 4542static void cfq_exit_queue(struct elevator_queue *e)
1da177e4 4543{
22e2c507 4544 struct cfq_data *cfqd = e->elevator_data;
165125e1 4545 struct request_queue *q = cfqd->queue;
22e2c507 4546
3b18152c 4547 cfq_shutdown_timer_wq(cfqd);
e2d74ac0 4548
d9ff4187 4549 spin_lock_irq(q->queue_lock);
e2d74ac0 4550
d9ff4187 4551 if (cfqd->active_queue)
e5ff082e 4552 __cfq_slice_expired(cfqd, cfqd->active_queue, 0);
e2d74ac0 4553
03aa264a
TH
4554 spin_unlock_irq(q->queue_lock);
4555
a90d742e
AV
4556 cfq_shutdown_timer_wq(cfqd);
4557
ffea73fc
TH
4558#ifdef CONFIG_CFQ_GROUP_IOSCHED
4559 blkcg_deactivate_policy(q, &blkcg_policy_cfq);
4560#else
f51b802c 4561 kfree(cfqd->root_group);
2abae55f 4562#endif
56edf7d7 4563 kfree(cfqd);
1da177e4
LT
4564}
4565
d50235b7 4566static int cfq_init_queue(struct request_queue *q, struct elevator_type *e)
1da177e4
LT
4567{
4568 struct cfq_data *cfqd;
3c798398 4569 struct blkcg_gq *blkg __maybe_unused;
a2b1693b 4570 int i, ret;
d50235b7
JM
4571 struct elevator_queue *eq;
4572
4573 eq = elevator_alloc(q, e);
4574 if (!eq)
4575 return -ENOMEM;
1da177e4 4576
c1b511eb 4577 cfqd = kzalloc_node(sizeof(*cfqd), GFP_KERNEL, q->node);
d50235b7
JM
4578 if (!cfqd) {
4579 kobject_put(&eq->kobj);
b2fab5ac 4580 return -ENOMEM;
d50235b7
JM
4581 }
4582 eq->elevator_data = cfqd;
80b15c73 4583
f51b802c 4584 cfqd->queue = q;
d50235b7
JM
4585 spin_lock_irq(q->queue_lock);
4586 q->elevator = eq;
4587 spin_unlock_irq(q->queue_lock);
f51b802c 4588
1fa8f6d6
VG
4589 /* Init root service tree */
4590 cfqd->grp_service_tree = CFQ_RB_ROOT;
4591
f51b802c 4592 /* Init root group and prefer root group over other groups by default */
25fb5169 4593#ifdef CONFIG_CFQ_GROUP_IOSCHED
3c798398 4594 ret = blkcg_activate_policy(q, &blkcg_policy_cfq);
a2b1693b
TH
4595 if (ret)
4596 goto out_free;
f51b802c 4597
a2b1693b 4598 cfqd->root_group = blkg_to_cfqg(q->root_blkg);
f51b802c 4599#else
a2b1693b 4600 ret = -ENOMEM;
f51b802c
TH
4601 cfqd->root_group = kzalloc_node(sizeof(*cfqd->root_group),
4602 GFP_KERNEL, cfqd->queue->node);
a2b1693b
TH
4603 if (!cfqd->root_group)
4604 goto out_free;
5624a4e4 4605
a2b1693b 4606 cfq_init_cfqg_base(cfqd->root_group);
3ecca629
TH
4607 cfqd->root_group->weight = 2 * CFQ_WEIGHT_LEGACY_DFL;
4608 cfqd->root_group->leaf_weight = 2 * CFQ_WEIGHT_LEGACY_DFL;
69d7fde5 4609#endif
5624a4e4 4610
26a2ac00
JA
4611 /*
4612 * Not strictly needed (since RB_ROOT just clears the node and we
4613 * zeroed cfqd on alloc), but better be safe in case someone decides
4614 * to add magic to the rb code
4615 */
4616 for (i = 0; i < CFQ_PRIO_LISTS; i++)
4617 cfqd->prio_trees[i] = RB_ROOT;
4618
6118b70b 4619 /*
d4aad7ff 4620 * Our fallback cfqq if cfq_get_queue() runs into OOM issues.
6118b70b 4621 * Grab a permanent reference to it, so that the normal code flow
f51b802c
TH
4622 * will not attempt to free it. oom_cfqq is linked to root_group
4623 * but shouldn't hold a reference as it'll never be unlinked. Lose
4624 * the reference from linking right away.
6118b70b
JA
4625 */
4626 cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
30d7b944 4627 cfqd->oom_cfqq.ref++;
1adaf3dd
TH
4628
4629 spin_lock_irq(q->queue_lock);
f51b802c 4630 cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, cfqd->root_group);
eb7d8c07 4631 cfqg_put(cfqd->root_group);
1adaf3dd 4632 spin_unlock_irq(q->queue_lock);
1da177e4 4633
91148325
JK
4634 hrtimer_init(&cfqd->idle_slice_timer, CLOCK_MONOTONIC,
4635 HRTIMER_MODE_REL);
22e2c507 4636 cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
22e2c507 4637
23e018a1 4638 INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
22e2c507 4639
1da177e4 4640 cfqd->cfq_quantum = cfq_quantum;
22e2c507
JA
4641 cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
4642 cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
1da177e4
LT
4643 cfqd->cfq_back_max = cfq_back_max;
4644 cfqd->cfq_back_penalty = cfq_back_penalty;
22e2c507
JA
4645 cfqd->cfq_slice[0] = cfq_slice_async;
4646 cfqd->cfq_slice[1] = cfq_slice_sync;
5bf14c07 4647 cfqd->cfq_target_latency = cfq_target_latency;
22e2c507 4648 cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
0bb97947 4649 cfqd->cfq_slice_idle = cfq_slice_idle;
80bdf0c7 4650 cfqd->cfq_group_idle = cfq_group_idle;
963b72fc 4651 cfqd->cfq_latency = 1;
e459dd08 4652 cfqd->hw_tag = -1;
edc71131
CZ
4653 /*
4654 * we optimistically start assuming sync ops weren't delayed in last
4655 * second, in order to have larger depth for async operations.
4656 */
9a7f38c4 4657 cfqd->last_delayed_sync = ktime_get_ns() - NSEC_PER_SEC;
b2fab5ac 4658 return 0;
a2b1693b
TH
4659
4660out_free:
4661 kfree(cfqd);
d50235b7 4662 kobject_put(&eq->kobj);
a2b1693b 4663 return ret;
1da177e4
LT
4664}
4665
0bb97947
JA
4666static void cfq_registered_queue(struct request_queue *q)
4667{
4668 struct elevator_queue *e = q->elevator;
4669 struct cfq_data *cfqd = e->elevator_data;
4670
4671 /*
4672 * Default to IOPS mode with no idling for SSDs
4673 */
4674 if (blk_queue_nonrot(q))
4675 cfqd->cfq_slice_idle = 0;
142bbdfc 4676 wbt_disable_default(q);
0bb97947
JA
4677}
4678
1da177e4
LT
4679/*
4680 * sysfs parts below -->
4681 */
1da177e4
LT
4682static ssize_t
4683cfq_var_show(unsigned int var, char *page)
4684{
176167ad 4685 return sprintf(page, "%u\n", var);
1da177e4
LT
4686}
4687
235f8da1 4688static void
4689cfq_var_store(unsigned int *var, const char *page)
1da177e4
LT
4690{
4691 char *p = (char *) page;
4692
4693 *var = simple_strtoul(p, &p, 10);
1da177e4
LT
4694}
4695
1da177e4 4696#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
b374d18a 4697static ssize_t __FUNC(struct elevator_queue *e, char *page) \
1da177e4 4698{ \
3d1ab40f 4699 struct cfq_data *cfqd = e->elevator_data; \
9a7f38c4 4700 u64 __data = __VAR; \
1da177e4 4701 if (__CONV) \
9a7f38c4 4702 __data = div_u64(__data, NSEC_PER_MSEC); \
1da177e4
LT
4703 return cfq_var_show(__data, (page)); \
4704}
4705SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
22e2c507
JA
4706SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
4707SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
e572ec7e
AV
4708SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
4709SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
22e2c507 4710SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
80bdf0c7 4711SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1);
22e2c507
JA
4712SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
4713SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
4714SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
963b72fc 4715SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
5bf14c07 4716SHOW_FUNCTION(cfq_target_latency_show, cfqd->cfq_target_latency, 1);
1da177e4
LT
4717#undef SHOW_FUNCTION
4718
d2d481d0
JM
4719#define USEC_SHOW_FUNCTION(__FUNC, __VAR) \
4720static ssize_t __FUNC(struct elevator_queue *e, char *page) \
4721{ \
4722 struct cfq_data *cfqd = e->elevator_data; \
4723 u64 __data = __VAR; \
4724 __data = div_u64(__data, NSEC_PER_USEC); \
4725 return cfq_var_show(__data, (page)); \
4726}
4727USEC_SHOW_FUNCTION(cfq_slice_idle_us_show, cfqd->cfq_slice_idle);
4728USEC_SHOW_FUNCTION(cfq_group_idle_us_show, cfqd->cfq_group_idle);
4729USEC_SHOW_FUNCTION(cfq_slice_sync_us_show, cfqd->cfq_slice[1]);
4730USEC_SHOW_FUNCTION(cfq_slice_async_us_show, cfqd->cfq_slice[0]);
4731USEC_SHOW_FUNCTION(cfq_target_latency_us_show, cfqd->cfq_target_latency);
4732#undef USEC_SHOW_FUNCTION
4733
1da177e4 4734#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
b374d18a 4735static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
1da177e4 4736{ \
3d1ab40f 4737 struct cfq_data *cfqd = e->elevator_data; \
1da177e4 4738 unsigned int __data; \
235f8da1 4739 cfq_var_store(&__data, (page)); \
1da177e4
LT
4740 if (__data < (MIN)) \
4741 __data = (MIN); \
4742 else if (__data > (MAX)) \
4743 __data = (MAX); \
4744 if (__CONV) \
9a7f38c4 4745 *(__PTR) = (u64)__data * NSEC_PER_MSEC; \
1da177e4
LT
4746 else \
4747 *(__PTR) = __data; \
235f8da1 4748 return count; \
1da177e4
LT
4749}
4750STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
fe094d98
JA
4751STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1,
4752 UINT_MAX, 1);
4753STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1,
4754 UINT_MAX, 1);
e572ec7e 4755STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
fe094d98
JA
4756STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
4757 UINT_MAX, 0);
22e2c507 4758STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
80bdf0c7 4759STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1);
22e2c507
JA
4760STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
4761STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
fe094d98
JA
4762STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
4763 UINT_MAX, 0);
963b72fc 4764STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
5bf14c07 4765STORE_FUNCTION(cfq_target_latency_store, &cfqd->cfq_target_latency, 1, UINT_MAX, 1);
1da177e4
LT
4766#undef STORE_FUNCTION
4767
d2d481d0
JM
4768#define USEC_STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \
4769static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
4770{ \
4771 struct cfq_data *cfqd = e->elevator_data; \
4772 unsigned int __data; \
235f8da1 4773 cfq_var_store(&__data, (page)); \
d2d481d0
JM
4774 if (__data < (MIN)) \
4775 __data = (MIN); \
4776 else if (__data > (MAX)) \
4777 __data = (MAX); \
4778 *(__PTR) = (u64)__data * NSEC_PER_USEC; \
235f8da1 4779 return count; \
d2d481d0
JM
4780}
4781USEC_STORE_FUNCTION(cfq_slice_idle_us_store, &cfqd->cfq_slice_idle, 0, UINT_MAX);
4782USEC_STORE_FUNCTION(cfq_group_idle_us_store, &cfqd->cfq_group_idle, 0, UINT_MAX);
4783USEC_STORE_FUNCTION(cfq_slice_sync_us_store, &cfqd->cfq_slice[1], 1, UINT_MAX);
4784USEC_STORE_FUNCTION(cfq_slice_async_us_store, &cfqd->cfq_slice[0], 1, UINT_MAX);
4785USEC_STORE_FUNCTION(cfq_target_latency_us_store, &cfqd->cfq_target_latency, 1, UINT_MAX);
4786#undef USEC_STORE_FUNCTION
4787
e572ec7e 4788#define CFQ_ATTR(name) \
5657a819 4789 __ATTR(name, 0644, cfq_##name##_show, cfq_##name##_store)
e572ec7e
AV
4790
4791static struct elv_fs_entry cfq_attrs[] = {
4792 CFQ_ATTR(quantum),
e572ec7e
AV
4793 CFQ_ATTR(fifo_expire_sync),
4794 CFQ_ATTR(fifo_expire_async),
4795 CFQ_ATTR(back_seek_max),
4796 CFQ_ATTR(back_seek_penalty),
4797 CFQ_ATTR(slice_sync),
d2d481d0 4798 CFQ_ATTR(slice_sync_us),
e572ec7e 4799 CFQ_ATTR(slice_async),
d2d481d0 4800 CFQ_ATTR(slice_async_us),
e572ec7e
AV
4801 CFQ_ATTR(slice_async_rq),
4802 CFQ_ATTR(slice_idle),
d2d481d0 4803 CFQ_ATTR(slice_idle_us),
80bdf0c7 4804 CFQ_ATTR(group_idle),
d2d481d0 4805 CFQ_ATTR(group_idle_us),
963b72fc 4806 CFQ_ATTR(low_latency),
5bf14c07 4807 CFQ_ATTR(target_latency),
d2d481d0 4808 CFQ_ATTR(target_latency_us),
e572ec7e 4809 __ATTR_NULL
1da177e4
LT
4810};
4811
1da177e4 4812static struct elevator_type iosched_cfq = {
c51ca6cf 4813 .ops.sq = {
1da177e4
LT
4814 .elevator_merge_fn = cfq_merge,
4815 .elevator_merged_fn = cfq_merged_request,
4816 .elevator_merge_req_fn = cfq_merged_requests,
72ef799b
TE
4817 .elevator_allow_bio_merge_fn = cfq_allow_bio_merge,
4818 .elevator_allow_rq_merge_fn = cfq_allow_rq_merge,
812d4026 4819 .elevator_bio_merged_fn = cfq_bio_merged,
b4878f24 4820 .elevator_dispatch_fn = cfq_dispatch_requests,
1da177e4 4821 .elevator_add_req_fn = cfq_insert_request,
b4878f24 4822 .elevator_activate_req_fn = cfq_activate_request,
1da177e4 4823 .elevator_deactivate_req_fn = cfq_deactivate_request,
1da177e4 4824 .elevator_completed_req_fn = cfq_completed_request,
21183b07
JA
4825 .elevator_former_req_fn = elv_rb_former_request,
4826 .elevator_latter_req_fn = elv_rb_latter_request,
9b84cacd 4827 .elevator_init_icq_fn = cfq_init_icq,
7e5a8794 4828 .elevator_exit_icq_fn = cfq_exit_icq,
1da177e4
LT
4829 .elevator_set_req_fn = cfq_set_request,
4830 .elevator_put_req_fn = cfq_put_request,
4831 .elevator_may_queue_fn = cfq_may_queue,
4832 .elevator_init_fn = cfq_init_queue,
4833 .elevator_exit_fn = cfq_exit_queue,
0bb97947 4834 .elevator_registered_fn = cfq_registered_queue,
1da177e4 4835 },
3d3c2379
TH
4836 .icq_size = sizeof(struct cfq_io_cq),
4837 .icq_align = __alignof__(struct cfq_io_cq),
3d1ab40f 4838 .elevator_attrs = cfq_attrs,
3d3c2379 4839 .elevator_name = "cfq",
1da177e4
LT
4840 .elevator_owner = THIS_MODULE,
4841};
4842
3e252066 4843#ifdef CONFIG_CFQ_GROUP_IOSCHED
3c798398 4844static struct blkcg_policy blkcg_policy_cfq = {
2ee867dc 4845 .dfl_cftypes = cfq_blkcg_files,
880f50e2 4846 .legacy_cftypes = cfq_blkcg_legacy_files,
f9fcc2d3 4847
e4a9bde9 4848 .cpd_alloc_fn = cfq_cpd_alloc,
e48453c3 4849 .cpd_init_fn = cfq_cpd_init,
e4a9bde9 4850 .cpd_free_fn = cfq_cpd_free,
69d7fde5 4851 .cpd_bind_fn = cfq_cpd_bind,
e4a9bde9 4852
001bea73 4853 .pd_alloc_fn = cfq_pd_alloc,
f9fcc2d3 4854 .pd_init_fn = cfq_pd_init,
0b39920b 4855 .pd_offline_fn = cfq_pd_offline,
001bea73 4856 .pd_free_fn = cfq_pd_free,
f9fcc2d3 4857 .pd_reset_stats_fn = cfq_pd_reset_stats,
3e252066 4858};
3e252066
VG
4859#endif
4860
1da177e4
LT
4861static int __init cfq_init(void)
4862{
3d3c2379
TH
4863 int ret;
4864
80bdf0c7 4865#ifdef CONFIG_CFQ_GROUP_IOSCHED
3c798398 4866 ret = blkcg_policy_register(&blkcg_policy_cfq);
8bd435b3
TH
4867 if (ret)
4868 return ret;
ffea73fc
TH
4869#else
4870 cfq_group_idle = 0;
4871#endif
8bd435b3 4872
fd794956 4873 ret = -ENOMEM;
3d3c2379
TH
4874 cfq_pool = KMEM_CACHE(cfq_queue, 0);
4875 if (!cfq_pool)
8bd435b3 4876 goto err_pol_unreg;
1da177e4 4877
3d3c2379 4878 ret = elv_register(&iosched_cfq);
8bd435b3
TH
4879 if (ret)
4880 goto err_free_pool;
3d3c2379 4881
2fdd82bd 4882 return 0;
8bd435b3
TH
4883
4884err_free_pool:
4885 kmem_cache_destroy(cfq_pool);
4886err_pol_unreg:
ffea73fc 4887#ifdef CONFIG_CFQ_GROUP_IOSCHED
3c798398 4888 blkcg_policy_unregister(&blkcg_policy_cfq);
ffea73fc 4889#endif
8bd435b3 4890 return ret;
1da177e4
LT
4891}
4892
4893static void __exit cfq_exit(void)
4894{
ffea73fc 4895#ifdef CONFIG_CFQ_GROUP_IOSCHED
3c798398 4896 blkcg_policy_unregister(&blkcg_policy_cfq);
ffea73fc 4897#endif
1da177e4 4898 elv_unregister(&iosched_cfq);
3d3c2379 4899 kmem_cache_destroy(cfq_pool);
1da177e4
LT
4900}
4901
4902module_init(cfq_init);
4903module_exit(cfq_exit);
4904
4905MODULE_AUTHOR("Jens Axboe");
4906MODULE_LICENSE("GPL");
4907MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");