block, cfq: move io_cq exit/release to blk-ioc.c
[linux-2.6-block.git] / block / cfq-iosched.c
... / ...
CommitLineData
1/*
2 * CFQ, or complete fairness queueing, disk scheduler.
3 *
4 * Based on ideas from a previously unfinished io
5 * scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
6 *
7 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
8 */
9#include <linux/module.h>
10#include <linux/slab.h>
11#include <linux/blkdev.h>
12#include <linux/elevator.h>
13#include <linux/jiffies.h>
14#include <linux/rbtree.h>
15#include <linux/ioprio.h>
16#include <linux/blktrace_api.h>
17#include "blk.h"
18#include "cfq.h"
19
20/*
21 * tunables
22 */
23/* max queue in one round of service */
24static const int cfq_quantum = 8;
25static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
26/* maximum backwards seek, in KiB */
27static const int cfq_back_max = 16 * 1024;
28/* penalty of a backwards seek */
29static const int cfq_back_penalty = 2;
30static const int cfq_slice_sync = HZ / 10;
31static int cfq_slice_async = HZ / 25;
32static const int cfq_slice_async_rq = 2;
33static int cfq_slice_idle = HZ / 125;
34static int cfq_group_idle = HZ / 125;
35static const int cfq_target_latency = HZ * 3/10; /* 300 ms */
36static const int cfq_hist_divisor = 4;
37
38/*
39 * offset from end of service tree
40 */
41#define CFQ_IDLE_DELAY (HZ / 5)
42
43/*
44 * below this threshold, we consider thinktime immediate
45 */
46#define CFQ_MIN_TT (2)
47
48#define CFQ_SLICE_SCALE (5)
49#define CFQ_HW_QUEUE_MIN (5)
50#define CFQ_SERVICE_SHIFT 12
51
52#define CFQQ_SEEK_THR (sector_t)(8 * 100)
53#define CFQQ_CLOSE_THR (sector_t)(8 * 1024)
54#define CFQQ_SECT_THR_NONROT (sector_t)(2 * 32)
55#define CFQQ_SEEKY(cfqq) (hweight32(cfqq->seek_history) > 32/8)
56
57#define RQ_CIC(rq) icq_to_cic((rq)->elv.icq)
58#define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elv.priv[0])
59#define RQ_CFQG(rq) (struct cfq_group *) ((rq)->elv.priv[1])
60
61static struct kmem_cache *cfq_pool;
62static struct kmem_cache *cfq_icq_pool;
63
64#define CFQ_PRIO_LISTS IOPRIO_BE_NR
65#define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
66#define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
67
68#define sample_valid(samples) ((samples) > 80)
69#define rb_entry_cfqg(node) rb_entry((node), struct cfq_group, rb_node)
70
71struct cfq_ttime {
72 unsigned long last_end_request;
73
74 unsigned long ttime_total;
75 unsigned long ttime_samples;
76 unsigned long ttime_mean;
77};
78
79/*
80 * Most of our rbtree usage is for sorting with min extraction, so
81 * if we cache the leftmost node we don't have to walk down the tree
82 * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should
83 * move this into the elevator for the rq sorting as well.
84 */
85struct cfq_rb_root {
86 struct rb_root rb;
87 struct rb_node *left;
88 unsigned count;
89 unsigned total_weight;
90 u64 min_vdisktime;
91 struct cfq_ttime ttime;
92};
93#define CFQ_RB_ROOT (struct cfq_rb_root) { .rb = RB_ROOT, \
94 .ttime = {.last_end_request = jiffies,},}
95
96/*
97 * Per process-grouping structure
98 */
99struct cfq_queue {
100 /* reference count */
101 int ref;
102 /* various state flags, see below */
103 unsigned int flags;
104 /* parent cfq_data */
105 struct cfq_data *cfqd;
106 /* service_tree member */
107 struct rb_node rb_node;
108 /* service_tree key */
109 unsigned long rb_key;
110 /* prio tree member */
111 struct rb_node p_node;
112 /* prio tree root we belong to, if any */
113 struct rb_root *p_root;
114 /* sorted list of pending requests */
115 struct rb_root sort_list;
116 /* if fifo isn't expired, next request to serve */
117 struct request *next_rq;
118 /* requests queued in sort_list */
119 int queued[2];
120 /* currently allocated requests */
121 int allocated[2];
122 /* fifo list of requests in sort_list */
123 struct list_head fifo;
124
125 /* time when queue got scheduled in to dispatch first request. */
126 unsigned long dispatch_start;
127 unsigned int allocated_slice;
128 unsigned int slice_dispatch;
129 /* time when first request from queue completed and slice started. */
130 unsigned long slice_start;
131 unsigned long slice_end;
132 long slice_resid;
133
134 /* pending priority requests */
135 int prio_pending;
136 /* number of requests that are on the dispatch list or inside driver */
137 int dispatched;
138
139 /* io prio of this group */
140 unsigned short ioprio, org_ioprio;
141 unsigned short ioprio_class;
142
143 pid_t pid;
144
145 u32 seek_history;
146 sector_t last_request_pos;
147
148 struct cfq_rb_root *service_tree;
149 struct cfq_queue *new_cfqq;
150 struct cfq_group *cfqg;
151 /* Number of sectors dispatched from queue in single dispatch round */
152 unsigned long nr_sectors;
153};
154
155/*
156 * First index in the service_trees.
157 * IDLE is handled separately, so it has negative index
158 */
159enum wl_prio_t {
160 BE_WORKLOAD = 0,
161 RT_WORKLOAD = 1,
162 IDLE_WORKLOAD = 2,
163 CFQ_PRIO_NR,
164};
165
166/*
167 * Second index in the service_trees.
168 */
169enum wl_type_t {
170 ASYNC_WORKLOAD = 0,
171 SYNC_NOIDLE_WORKLOAD = 1,
172 SYNC_WORKLOAD = 2
173};
174
175/* This is per cgroup per device grouping structure */
176struct cfq_group {
177 /* group service_tree member */
178 struct rb_node rb_node;
179
180 /* group service_tree key */
181 u64 vdisktime;
182 unsigned int weight;
183 unsigned int new_weight;
184 bool needs_update;
185
186 /* number of cfqq currently on this group */
187 int nr_cfqq;
188
189 /*
190 * Per group busy queues average. Useful for workload slice calc. We
191 * create the array for each prio class but at run time it is used
192 * only for RT and BE class and slot for IDLE class remains unused.
193 * This is primarily done to avoid confusion and a gcc warning.
194 */
195 unsigned int busy_queues_avg[CFQ_PRIO_NR];
196 /*
197 * rr lists of queues with requests. We maintain service trees for
198 * RT and BE classes. These trees are subdivided in subclasses
199 * of SYNC, SYNC_NOIDLE and ASYNC based on workload type. For IDLE
200 * class there is no subclassification and all the cfq queues go on
201 * a single tree service_tree_idle.
202 * Counts are embedded in the cfq_rb_root
203 */
204 struct cfq_rb_root service_trees[2][3];
205 struct cfq_rb_root service_tree_idle;
206
207 unsigned long saved_workload_slice;
208 enum wl_type_t saved_workload;
209 enum wl_prio_t saved_serving_prio;
210 struct blkio_group blkg;
211#ifdef CONFIG_CFQ_GROUP_IOSCHED
212 struct hlist_node cfqd_node;
213 int ref;
214#endif
215 /* number of requests that are on the dispatch list or inside driver */
216 int dispatched;
217 struct cfq_ttime ttime;
218};
219
220struct cfq_io_cq {
221 struct io_cq icq; /* must be the first member */
222 struct cfq_queue *cfqq[2];
223 struct cfq_ttime ttime;
224};
225
226/*
227 * Per block device queue structure
228 */
229struct cfq_data {
230 struct request_queue *queue;
231 /* Root service tree for cfq_groups */
232 struct cfq_rb_root grp_service_tree;
233 struct cfq_group root_group;
234
235 /*
236 * The priority currently being served
237 */
238 enum wl_prio_t serving_prio;
239 enum wl_type_t serving_type;
240 unsigned long workload_expires;
241 struct cfq_group *serving_group;
242
243 /*
244 * Each priority tree is sorted by next_request position. These
245 * trees are used when determining if two or more queues are
246 * interleaving requests (see cfq_close_cooperator).
247 */
248 struct rb_root prio_trees[CFQ_PRIO_LISTS];
249
250 unsigned int busy_queues;
251 unsigned int busy_sync_queues;
252
253 int rq_in_driver;
254 int rq_in_flight[2];
255
256 /*
257 * queue-depth detection
258 */
259 int rq_queued;
260 int hw_tag;
261 /*
262 * hw_tag can be
263 * -1 => indeterminate, (cfq will behave as if NCQ is present, to allow better detection)
264 * 1 => NCQ is present (hw_tag_est_depth is the estimated max depth)
265 * 0 => no NCQ
266 */
267 int hw_tag_est_depth;
268 unsigned int hw_tag_samples;
269
270 /*
271 * idle window management
272 */
273 struct timer_list idle_slice_timer;
274 struct work_struct unplug_work;
275
276 struct cfq_queue *active_queue;
277 struct cfq_io_cq *active_cic;
278
279 /*
280 * async queue for each priority case
281 */
282 struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
283 struct cfq_queue *async_idle_cfqq;
284
285 sector_t last_position;
286
287 /*
288 * tunables, see top of file
289 */
290 unsigned int cfq_quantum;
291 unsigned int cfq_fifo_expire[2];
292 unsigned int cfq_back_penalty;
293 unsigned int cfq_back_max;
294 unsigned int cfq_slice[2];
295 unsigned int cfq_slice_async_rq;
296 unsigned int cfq_slice_idle;
297 unsigned int cfq_group_idle;
298 unsigned int cfq_latency;
299
300 /*
301 * Fallback dummy cfqq for extreme OOM conditions
302 */
303 struct cfq_queue oom_cfqq;
304
305 unsigned long last_delayed_sync;
306
307 /* List of cfq groups being managed on this device*/
308 struct hlist_head cfqg_list;
309
310 /* Number of groups which are on blkcg->blkg_list */
311 unsigned int nr_blkcg_linked_grps;
312};
313
314static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
315
316static struct cfq_rb_root *service_tree_for(struct cfq_group *cfqg,
317 enum wl_prio_t prio,
318 enum wl_type_t type)
319{
320 if (!cfqg)
321 return NULL;
322
323 if (prio == IDLE_WORKLOAD)
324 return &cfqg->service_tree_idle;
325
326 return &cfqg->service_trees[prio][type];
327}
328
329enum cfqq_state_flags {
330 CFQ_CFQQ_FLAG_on_rr = 0, /* on round-robin busy list */
331 CFQ_CFQQ_FLAG_wait_request, /* waiting for a request */
332 CFQ_CFQQ_FLAG_must_dispatch, /* must be allowed a dispatch */
333 CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */
334 CFQ_CFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */
335 CFQ_CFQQ_FLAG_idle_window, /* slice idling enabled */
336 CFQ_CFQQ_FLAG_prio_changed, /* task priority has changed */
337 CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */
338 CFQ_CFQQ_FLAG_sync, /* synchronous queue */
339 CFQ_CFQQ_FLAG_coop, /* cfqq is shared */
340 CFQ_CFQQ_FLAG_split_coop, /* shared cfqq will be splitted */
341 CFQ_CFQQ_FLAG_deep, /* sync cfqq experienced large depth */
342 CFQ_CFQQ_FLAG_wait_busy, /* Waiting for next request */
343};
344
345#define CFQ_CFQQ_FNS(name) \
346static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq) \
347{ \
348 (cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name); \
349} \
350static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq) \
351{ \
352 (cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name); \
353} \
354static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \
355{ \
356 return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0; \
357}
358
359CFQ_CFQQ_FNS(on_rr);
360CFQ_CFQQ_FNS(wait_request);
361CFQ_CFQQ_FNS(must_dispatch);
362CFQ_CFQQ_FNS(must_alloc_slice);
363CFQ_CFQQ_FNS(fifo_expire);
364CFQ_CFQQ_FNS(idle_window);
365CFQ_CFQQ_FNS(prio_changed);
366CFQ_CFQQ_FNS(slice_new);
367CFQ_CFQQ_FNS(sync);
368CFQ_CFQQ_FNS(coop);
369CFQ_CFQQ_FNS(split_coop);
370CFQ_CFQQ_FNS(deep);
371CFQ_CFQQ_FNS(wait_busy);
372#undef CFQ_CFQQ_FNS
373
374#ifdef CONFIG_CFQ_GROUP_IOSCHED
375#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
376 blk_add_trace_msg((cfqd)->queue, "cfq%d%c %s " fmt, (cfqq)->pid, \
377 cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
378 blkg_path(&(cfqq)->cfqg->blkg), ##args)
379
380#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) \
381 blk_add_trace_msg((cfqd)->queue, "%s " fmt, \
382 blkg_path(&(cfqg)->blkg), ##args) \
383
384#else
385#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
386 blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args)
387#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do {} while (0)
388#endif
389#define cfq_log(cfqd, fmt, args...) \
390 blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
391
392/* Traverses through cfq group service trees */
393#define for_each_cfqg_st(cfqg, i, j, st) \
394 for (i = 0; i <= IDLE_WORKLOAD; i++) \
395 for (j = 0, st = i < IDLE_WORKLOAD ? &cfqg->service_trees[i][j]\
396 : &cfqg->service_tree_idle; \
397 (i < IDLE_WORKLOAD && j <= SYNC_WORKLOAD) || \
398 (i == IDLE_WORKLOAD && j == 0); \
399 j++, st = i < IDLE_WORKLOAD ? \
400 &cfqg->service_trees[i][j]: NULL) \
401
402static inline bool cfq_io_thinktime_big(struct cfq_data *cfqd,
403 struct cfq_ttime *ttime, bool group_idle)
404{
405 unsigned long slice;
406 if (!sample_valid(ttime->ttime_samples))
407 return false;
408 if (group_idle)
409 slice = cfqd->cfq_group_idle;
410 else
411 slice = cfqd->cfq_slice_idle;
412 return ttime->ttime_mean > slice;
413}
414
415static inline bool iops_mode(struct cfq_data *cfqd)
416{
417 /*
418 * If we are not idling on queues and it is a NCQ drive, parallel
419 * execution of requests is on and measuring time is not possible
420 * in most of the cases until and unless we drive shallower queue
421 * depths and that becomes a performance bottleneck. In such cases
422 * switch to start providing fairness in terms of number of IOs.
423 */
424 if (!cfqd->cfq_slice_idle && cfqd->hw_tag)
425 return true;
426 else
427 return false;
428}
429
430static inline enum wl_prio_t cfqq_prio(struct cfq_queue *cfqq)
431{
432 if (cfq_class_idle(cfqq))
433 return IDLE_WORKLOAD;
434 if (cfq_class_rt(cfqq))
435 return RT_WORKLOAD;
436 return BE_WORKLOAD;
437}
438
439
440static enum wl_type_t cfqq_type(struct cfq_queue *cfqq)
441{
442 if (!cfq_cfqq_sync(cfqq))
443 return ASYNC_WORKLOAD;
444 if (!cfq_cfqq_idle_window(cfqq))
445 return SYNC_NOIDLE_WORKLOAD;
446 return SYNC_WORKLOAD;
447}
448
449static inline int cfq_group_busy_queues_wl(enum wl_prio_t wl,
450 struct cfq_data *cfqd,
451 struct cfq_group *cfqg)
452{
453 if (wl == IDLE_WORKLOAD)
454 return cfqg->service_tree_idle.count;
455
456 return cfqg->service_trees[wl][ASYNC_WORKLOAD].count
457 + cfqg->service_trees[wl][SYNC_NOIDLE_WORKLOAD].count
458 + cfqg->service_trees[wl][SYNC_WORKLOAD].count;
459}
460
461static inline int cfqg_busy_async_queues(struct cfq_data *cfqd,
462 struct cfq_group *cfqg)
463{
464 return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count
465 + cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count;
466}
467
468static void cfq_dispatch_insert(struct request_queue *, struct request *);
469static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool,
470 struct io_context *, gfp_t);
471
472static inline struct cfq_io_cq *icq_to_cic(struct io_cq *icq)
473{
474 /* cic->icq is the first member, %NULL will convert to %NULL */
475 return container_of(icq, struct cfq_io_cq, icq);
476}
477
478static inline struct cfq_io_cq *cfq_cic_lookup(struct cfq_data *cfqd,
479 struct io_context *ioc)
480{
481 if (ioc)
482 return icq_to_cic(ioc_lookup_icq(ioc, cfqd->queue));
483 return NULL;
484}
485
486static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_cq *cic, bool is_sync)
487{
488 return cic->cfqq[is_sync];
489}
490
491static inline void cic_set_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq,
492 bool is_sync)
493{
494 cic->cfqq[is_sync] = cfqq;
495}
496
497static inline struct cfq_data *cic_to_cfqd(struct cfq_io_cq *cic)
498{
499 return cic->icq.q->elevator->elevator_data;
500}
501
502/*
503 * We regard a request as SYNC, if it's either a read or has the SYNC bit
504 * set (in which case it could also be direct WRITE).
505 */
506static inline bool cfq_bio_sync(struct bio *bio)
507{
508 return bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC);
509}
510
511/*
512 * scheduler run of queue, if there are requests pending and no one in the
513 * driver that will restart queueing
514 */
515static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
516{
517 if (cfqd->busy_queues) {
518 cfq_log(cfqd, "schedule dispatch");
519 kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work);
520 }
521}
522
523/*
524 * Scale schedule slice based on io priority. Use the sync time slice only
525 * if a queue is marked sync and has sync io queued. A sync queue with async
526 * io only, should not get full sync slice length.
527 */
528static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync,
529 unsigned short prio)
530{
531 const int base_slice = cfqd->cfq_slice[sync];
532
533 WARN_ON(prio >= IOPRIO_BE_NR);
534
535 return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio));
536}
537
538static inline int
539cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
540{
541 return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
542}
543
544static inline u64 cfq_scale_slice(unsigned long delta, struct cfq_group *cfqg)
545{
546 u64 d = delta << CFQ_SERVICE_SHIFT;
547
548 d = d * BLKIO_WEIGHT_DEFAULT;
549 do_div(d, cfqg->weight);
550 return d;
551}
552
553static inline u64 max_vdisktime(u64 min_vdisktime, u64 vdisktime)
554{
555 s64 delta = (s64)(vdisktime - min_vdisktime);
556 if (delta > 0)
557 min_vdisktime = vdisktime;
558
559 return min_vdisktime;
560}
561
562static inline u64 min_vdisktime(u64 min_vdisktime, u64 vdisktime)
563{
564 s64 delta = (s64)(vdisktime - min_vdisktime);
565 if (delta < 0)
566 min_vdisktime = vdisktime;
567
568 return min_vdisktime;
569}
570
571static void update_min_vdisktime(struct cfq_rb_root *st)
572{
573 struct cfq_group *cfqg;
574
575 if (st->left) {
576 cfqg = rb_entry_cfqg(st->left);
577 st->min_vdisktime = max_vdisktime(st->min_vdisktime,
578 cfqg->vdisktime);
579 }
580}
581
582/*
583 * get averaged number of queues of RT/BE priority.
584 * average is updated, with a formula that gives more weight to higher numbers,
585 * to quickly follows sudden increases and decrease slowly
586 */
587
588static inline unsigned cfq_group_get_avg_queues(struct cfq_data *cfqd,
589 struct cfq_group *cfqg, bool rt)
590{
591 unsigned min_q, max_q;
592 unsigned mult = cfq_hist_divisor - 1;
593 unsigned round = cfq_hist_divisor / 2;
594 unsigned busy = cfq_group_busy_queues_wl(rt, cfqd, cfqg);
595
596 min_q = min(cfqg->busy_queues_avg[rt], busy);
597 max_q = max(cfqg->busy_queues_avg[rt], busy);
598 cfqg->busy_queues_avg[rt] = (mult * max_q + min_q + round) /
599 cfq_hist_divisor;
600 return cfqg->busy_queues_avg[rt];
601}
602
603static inline unsigned
604cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg)
605{
606 struct cfq_rb_root *st = &cfqd->grp_service_tree;
607
608 return cfq_target_latency * cfqg->weight / st->total_weight;
609}
610
611static inline unsigned
612cfq_scaled_cfqq_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
613{
614 unsigned slice = cfq_prio_to_slice(cfqd, cfqq);
615 if (cfqd->cfq_latency) {
616 /*
617 * interested queues (we consider only the ones with the same
618 * priority class in the cfq group)
619 */
620 unsigned iq = cfq_group_get_avg_queues(cfqd, cfqq->cfqg,
621 cfq_class_rt(cfqq));
622 unsigned sync_slice = cfqd->cfq_slice[1];
623 unsigned expect_latency = sync_slice * iq;
624 unsigned group_slice = cfq_group_slice(cfqd, cfqq->cfqg);
625
626 if (expect_latency > group_slice) {
627 unsigned base_low_slice = 2 * cfqd->cfq_slice_idle;
628 /* scale low_slice according to IO priority
629 * and sync vs async */
630 unsigned low_slice =
631 min(slice, base_low_slice * slice / sync_slice);
632 /* the adapted slice value is scaled to fit all iqs
633 * into the target latency */
634 slice = max(slice * group_slice / expect_latency,
635 low_slice);
636 }
637 }
638 return slice;
639}
640
641static inline void
642cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
643{
644 unsigned slice = cfq_scaled_cfqq_slice(cfqd, cfqq);
645
646 cfqq->slice_start = jiffies;
647 cfqq->slice_end = jiffies + slice;
648 cfqq->allocated_slice = slice;
649 cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies);
650}
651
652/*
653 * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
654 * isn't valid until the first request from the dispatch is activated
655 * and the slice time set.
656 */
657static inline bool cfq_slice_used(struct cfq_queue *cfqq)
658{
659 if (cfq_cfqq_slice_new(cfqq))
660 return false;
661 if (time_before(jiffies, cfqq->slice_end))
662 return false;
663
664 return true;
665}
666
667/*
668 * Lifted from AS - choose which of rq1 and rq2 that is best served now.
669 * We choose the request that is closest to the head right now. Distance
670 * behind the head is penalized and only allowed to a certain extent.
671 */
672static struct request *
673cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2, sector_t last)
674{
675 sector_t s1, s2, d1 = 0, d2 = 0;
676 unsigned long back_max;
677#define CFQ_RQ1_WRAP 0x01 /* request 1 wraps */
678#define CFQ_RQ2_WRAP 0x02 /* request 2 wraps */
679 unsigned wrap = 0; /* bit mask: requests behind the disk head? */
680
681 if (rq1 == NULL || rq1 == rq2)
682 return rq2;
683 if (rq2 == NULL)
684 return rq1;
685
686 if (rq_is_sync(rq1) != rq_is_sync(rq2))
687 return rq_is_sync(rq1) ? rq1 : rq2;
688
689 if ((rq1->cmd_flags ^ rq2->cmd_flags) & REQ_PRIO)
690 return rq1->cmd_flags & REQ_PRIO ? rq1 : rq2;
691
692 s1 = blk_rq_pos(rq1);
693 s2 = blk_rq_pos(rq2);
694
695 /*
696 * by definition, 1KiB is 2 sectors
697 */
698 back_max = cfqd->cfq_back_max * 2;
699
700 /*
701 * Strict one way elevator _except_ in the case where we allow
702 * short backward seeks which are biased as twice the cost of a
703 * similar forward seek.
704 */
705 if (s1 >= last)
706 d1 = s1 - last;
707 else if (s1 + back_max >= last)
708 d1 = (last - s1) * cfqd->cfq_back_penalty;
709 else
710 wrap |= CFQ_RQ1_WRAP;
711
712 if (s2 >= last)
713 d2 = s2 - last;
714 else if (s2 + back_max >= last)
715 d2 = (last - s2) * cfqd->cfq_back_penalty;
716 else
717 wrap |= CFQ_RQ2_WRAP;
718
719 /* Found required data */
720
721 /*
722 * By doing switch() on the bit mask "wrap" we avoid having to
723 * check two variables for all permutations: --> faster!
724 */
725 switch (wrap) {
726 case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
727 if (d1 < d2)
728 return rq1;
729 else if (d2 < d1)
730 return rq2;
731 else {
732 if (s1 >= s2)
733 return rq1;
734 else
735 return rq2;
736 }
737
738 case CFQ_RQ2_WRAP:
739 return rq1;
740 case CFQ_RQ1_WRAP:
741 return rq2;
742 case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
743 default:
744 /*
745 * Since both rqs are wrapped,
746 * start with the one that's further behind head
747 * (--> only *one* back seek required),
748 * since back seek takes more time than forward.
749 */
750 if (s1 <= s2)
751 return rq1;
752 else
753 return rq2;
754 }
755}
756
757/*
758 * The below is leftmost cache rbtree addon
759 */
760static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root)
761{
762 /* Service tree is empty */
763 if (!root->count)
764 return NULL;
765
766 if (!root->left)
767 root->left = rb_first(&root->rb);
768
769 if (root->left)
770 return rb_entry(root->left, struct cfq_queue, rb_node);
771
772 return NULL;
773}
774
775static struct cfq_group *cfq_rb_first_group(struct cfq_rb_root *root)
776{
777 if (!root->left)
778 root->left = rb_first(&root->rb);
779
780 if (root->left)
781 return rb_entry_cfqg(root->left);
782
783 return NULL;
784}
785
786static void rb_erase_init(struct rb_node *n, struct rb_root *root)
787{
788 rb_erase(n, root);
789 RB_CLEAR_NODE(n);
790}
791
792static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
793{
794 if (root->left == n)
795 root->left = NULL;
796 rb_erase_init(n, &root->rb);
797 --root->count;
798}
799
800/*
801 * would be nice to take fifo expire time into account as well
802 */
803static struct request *
804cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
805 struct request *last)
806{
807 struct rb_node *rbnext = rb_next(&last->rb_node);
808 struct rb_node *rbprev = rb_prev(&last->rb_node);
809 struct request *next = NULL, *prev = NULL;
810
811 BUG_ON(RB_EMPTY_NODE(&last->rb_node));
812
813 if (rbprev)
814 prev = rb_entry_rq(rbprev);
815
816 if (rbnext)
817 next = rb_entry_rq(rbnext);
818 else {
819 rbnext = rb_first(&cfqq->sort_list);
820 if (rbnext && rbnext != &last->rb_node)
821 next = rb_entry_rq(rbnext);
822 }
823
824 return cfq_choose_req(cfqd, next, prev, blk_rq_pos(last));
825}
826
827static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
828 struct cfq_queue *cfqq)
829{
830 /*
831 * just an approximation, should be ok.
832 */
833 return (cfqq->cfqg->nr_cfqq - 1) * (cfq_prio_slice(cfqd, 1, 0) -
834 cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio));
835}
836
837static inline s64
838cfqg_key(struct cfq_rb_root *st, struct cfq_group *cfqg)
839{
840 return cfqg->vdisktime - st->min_vdisktime;
841}
842
843static void
844__cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
845{
846 struct rb_node **node = &st->rb.rb_node;
847 struct rb_node *parent = NULL;
848 struct cfq_group *__cfqg;
849 s64 key = cfqg_key(st, cfqg);
850 int left = 1;
851
852 while (*node != NULL) {
853 parent = *node;
854 __cfqg = rb_entry_cfqg(parent);
855
856 if (key < cfqg_key(st, __cfqg))
857 node = &parent->rb_left;
858 else {
859 node = &parent->rb_right;
860 left = 0;
861 }
862 }
863
864 if (left)
865 st->left = &cfqg->rb_node;
866
867 rb_link_node(&cfqg->rb_node, parent, node);
868 rb_insert_color(&cfqg->rb_node, &st->rb);
869}
870
871static void
872cfq_update_group_weight(struct cfq_group *cfqg)
873{
874 BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
875 if (cfqg->needs_update) {
876 cfqg->weight = cfqg->new_weight;
877 cfqg->needs_update = false;
878 }
879}
880
881static void
882cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
883{
884 BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
885
886 cfq_update_group_weight(cfqg);
887 __cfq_group_service_tree_add(st, cfqg);
888 st->total_weight += cfqg->weight;
889}
890
891static void
892cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
893{
894 struct cfq_rb_root *st = &cfqd->grp_service_tree;
895 struct cfq_group *__cfqg;
896 struct rb_node *n;
897
898 cfqg->nr_cfqq++;
899 if (!RB_EMPTY_NODE(&cfqg->rb_node))
900 return;
901
902 /*
903 * Currently put the group at the end. Later implement something
904 * so that groups get lesser vtime based on their weights, so that
905 * if group does not loose all if it was not continuously backlogged.
906 */
907 n = rb_last(&st->rb);
908 if (n) {
909 __cfqg = rb_entry_cfqg(n);
910 cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY;
911 } else
912 cfqg->vdisktime = st->min_vdisktime;
913 cfq_group_service_tree_add(st, cfqg);
914}
915
916static void
917cfq_group_service_tree_del(struct cfq_rb_root *st, struct cfq_group *cfqg)
918{
919 st->total_weight -= cfqg->weight;
920 if (!RB_EMPTY_NODE(&cfqg->rb_node))
921 cfq_rb_erase(&cfqg->rb_node, st);
922}
923
924static void
925cfq_group_notify_queue_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
926{
927 struct cfq_rb_root *st = &cfqd->grp_service_tree;
928
929 BUG_ON(cfqg->nr_cfqq < 1);
930 cfqg->nr_cfqq--;
931
932 /* If there are other cfq queues under this group, don't delete it */
933 if (cfqg->nr_cfqq)
934 return;
935
936 cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
937 cfq_group_service_tree_del(st, cfqg);
938 cfqg->saved_workload_slice = 0;
939 cfq_blkiocg_update_dequeue_stats(&cfqg->blkg, 1);
940}
941
942static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq,
943 unsigned int *unaccounted_time)
944{
945 unsigned int slice_used;
946
947 /*
948 * Queue got expired before even a single request completed or
949 * got expired immediately after first request completion.
950 */
951 if (!cfqq->slice_start || cfqq->slice_start == jiffies) {
952 /*
953 * Also charge the seek time incurred to the group, otherwise
954 * if there are mutiple queues in the group, each can dispatch
955 * a single request on seeky media and cause lots of seek time
956 * and group will never know it.
957 */
958 slice_used = max_t(unsigned, (jiffies - cfqq->dispatch_start),
959 1);
960 } else {
961 slice_used = jiffies - cfqq->slice_start;
962 if (slice_used > cfqq->allocated_slice) {
963 *unaccounted_time = slice_used - cfqq->allocated_slice;
964 slice_used = cfqq->allocated_slice;
965 }
966 if (time_after(cfqq->slice_start, cfqq->dispatch_start))
967 *unaccounted_time += cfqq->slice_start -
968 cfqq->dispatch_start;
969 }
970
971 return slice_used;
972}
973
974static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
975 struct cfq_queue *cfqq)
976{
977 struct cfq_rb_root *st = &cfqd->grp_service_tree;
978 unsigned int used_sl, charge, unaccounted_sl = 0;
979 int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
980 - cfqg->service_tree_idle.count;
981
982 BUG_ON(nr_sync < 0);
983 used_sl = charge = cfq_cfqq_slice_usage(cfqq, &unaccounted_sl);
984
985 if (iops_mode(cfqd))
986 charge = cfqq->slice_dispatch;
987 else if (!cfq_cfqq_sync(cfqq) && !nr_sync)
988 charge = cfqq->allocated_slice;
989
990 /* Can't update vdisktime while group is on service tree */
991 cfq_group_service_tree_del(st, cfqg);
992 cfqg->vdisktime += cfq_scale_slice(charge, cfqg);
993 /* If a new weight was requested, update now, off tree */
994 cfq_group_service_tree_add(st, cfqg);
995
996 /* This group is being expired. Save the context */
997 if (time_after(cfqd->workload_expires, jiffies)) {
998 cfqg->saved_workload_slice = cfqd->workload_expires
999 - jiffies;
1000 cfqg->saved_workload = cfqd->serving_type;
1001 cfqg->saved_serving_prio = cfqd->serving_prio;
1002 } else
1003 cfqg->saved_workload_slice = 0;
1004
1005 cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
1006 st->min_vdisktime);
1007 cfq_log_cfqq(cfqq->cfqd, cfqq,
1008 "sl_used=%u disp=%u charge=%u iops=%u sect=%lu",
1009 used_sl, cfqq->slice_dispatch, charge,
1010 iops_mode(cfqd), cfqq->nr_sectors);
1011 cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl,
1012 unaccounted_sl);
1013 cfq_blkiocg_set_start_empty_time(&cfqg->blkg);
1014}
1015
1016#ifdef CONFIG_CFQ_GROUP_IOSCHED
1017static inline struct cfq_group *cfqg_of_blkg(struct blkio_group *blkg)
1018{
1019 if (blkg)
1020 return container_of(blkg, struct cfq_group, blkg);
1021 return NULL;
1022}
1023
1024static void cfq_update_blkio_group_weight(void *key, struct blkio_group *blkg,
1025 unsigned int weight)
1026{
1027 struct cfq_group *cfqg = cfqg_of_blkg(blkg);
1028 cfqg->new_weight = weight;
1029 cfqg->needs_update = true;
1030}
1031
1032static void cfq_init_add_cfqg_lists(struct cfq_data *cfqd,
1033 struct cfq_group *cfqg, struct blkio_cgroup *blkcg)
1034{
1035 struct backing_dev_info *bdi = &cfqd->queue->backing_dev_info;
1036 unsigned int major, minor;
1037
1038 /*
1039 * Add group onto cgroup list. It might happen that bdi->dev is
1040 * not initialized yet. Initialize this new group without major
1041 * and minor info and this info will be filled in once a new thread
1042 * comes for IO.
1043 */
1044 if (bdi->dev) {
1045 sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
1046 cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg,
1047 (void *)cfqd, MKDEV(major, minor));
1048 } else
1049 cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg,
1050 (void *)cfqd, 0);
1051
1052 cfqd->nr_blkcg_linked_grps++;
1053 cfqg->weight = blkcg_get_weight(blkcg, cfqg->blkg.dev);
1054
1055 /* Add group on cfqd list */
1056 hlist_add_head(&cfqg->cfqd_node, &cfqd->cfqg_list);
1057}
1058
1059/*
1060 * Should be called from sleepable context. No request queue lock as per
1061 * cpu stats are allocated dynamically and alloc_percpu needs to be called
1062 * from sleepable context.
1063 */
1064static struct cfq_group * cfq_alloc_cfqg(struct cfq_data *cfqd)
1065{
1066 struct cfq_group *cfqg = NULL;
1067 int i, j, ret;
1068 struct cfq_rb_root *st;
1069
1070 cfqg = kzalloc_node(sizeof(*cfqg), GFP_ATOMIC, cfqd->queue->node);
1071 if (!cfqg)
1072 return NULL;
1073
1074 for_each_cfqg_st(cfqg, i, j, st)
1075 *st = CFQ_RB_ROOT;
1076 RB_CLEAR_NODE(&cfqg->rb_node);
1077
1078 cfqg->ttime.last_end_request = jiffies;
1079
1080 /*
1081 * Take the initial reference that will be released on destroy
1082 * This can be thought of a joint reference by cgroup and
1083 * elevator which will be dropped by either elevator exit
1084 * or cgroup deletion path depending on who is exiting first.
1085 */
1086 cfqg->ref = 1;
1087
1088 ret = blkio_alloc_blkg_stats(&cfqg->blkg);
1089 if (ret) {
1090 kfree(cfqg);
1091 return NULL;
1092 }
1093
1094 return cfqg;
1095}
1096
1097static struct cfq_group *
1098cfq_find_cfqg(struct cfq_data *cfqd, struct blkio_cgroup *blkcg)
1099{
1100 struct cfq_group *cfqg = NULL;
1101 void *key = cfqd;
1102 struct backing_dev_info *bdi = &cfqd->queue->backing_dev_info;
1103 unsigned int major, minor;
1104
1105 /*
1106 * This is the common case when there are no blkio cgroups.
1107 * Avoid lookup in this case
1108 */
1109 if (blkcg == &blkio_root_cgroup)
1110 cfqg = &cfqd->root_group;
1111 else
1112 cfqg = cfqg_of_blkg(blkiocg_lookup_group(blkcg, key));
1113
1114 if (cfqg && !cfqg->blkg.dev && bdi->dev && dev_name(bdi->dev)) {
1115 sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
1116 cfqg->blkg.dev = MKDEV(major, minor);
1117 }
1118
1119 return cfqg;
1120}
1121
1122/*
1123 * Search for the cfq group current task belongs to. request_queue lock must
1124 * be held.
1125 */
1126static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd)
1127{
1128 struct blkio_cgroup *blkcg;
1129 struct cfq_group *cfqg = NULL, *__cfqg = NULL;
1130 struct request_queue *q = cfqd->queue;
1131
1132 rcu_read_lock();
1133 blkcg = task_blkio_cgroup(current);
1134 cfqg = cfq_find_cfqg(cfqd, blkcg);
1135 if (cfqg) {
1136 rcu_read_unlock();
1137 return cfqg;
1138 }
1139
1140 /*
1141 * Need to allocate a group. Allocation of group also needs allocation
1142 * of per cpu stats which in-turn takes a mutex() and can block. Hence
1143 * we need to drop rcu lock and queue_lock before we call alloc.
1144 *
1145 * Not taking any queue reference here and assuming that queue is
1146 * around by the time we return. CFQ queue allocation code does
1147 * the same. It might be racy though.
1148 */
1149
1150 rcu_read_unlock();
1151 spin_unlock_irq(q->queue_lock);
1152
1153 cfqg = cfq_alloc_cfqg(cfqd);
1154
1155 spin_lock_irq(q->queue_lock);
1156
1157 rcu_read_lock();
1158 blkcg = task_blkio_cgroup(current);
1159
1160 /*
1161 * If some other thread already allocated the group while we were
1162 * not holding queue lock, free up the group
1163 */
1164 __cfqg = cfq_find_cfqg(cfqd, blkcg);
1165
1166 if (__cfqg) {
1167 kfree(cfqg);
1168 rcu_read_unlock();
1169 return __cfqg;
1170 }
1171
1172 if (!cfqg)
1173 cfqg = &cfqd->root_group;
1174
1175 cfq_init_add_cfqg_lists(cfqd, cfqg, blkcg);
1176 rcu_read_unlock();
1177 return cfqg;
1178}
1179
1180static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg)
1181{
1182 cfqg->ref++;
1183 return cfqg;
1184}
1185
1186static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
1187{
1188 /* Currently, all async queues are mapped to root group */
1189 if (!cfq_cfqq_sync(cfqq))
1190 cfqg = &cfqq->cfqd->root_group;
1191
1192 cfqq->cfqg = cfqg;
1193 /* cfqq reference on cfqg */
1194 cfqq->cfqg->ref++;
1195}
1196
1197static void cfq_put_cfqg(struct cfq_group *cfqg)
1198{
1199 struct cfq_rb_root *st;
1200 int i, j;
1201
1202 BUG_ON(cfqg->ref <= 0);
1203 cfqg->ref--;
1204 if (cfqg->ref)
1205 return;
1206 for_each_cfqg_st(cfqg, i, j, st)
1207 BUG_ON(!RB_EMPTY_ROOT(&st->rb));
1208 free_percpu(cfqg->blkg.stats_cpu);
1209 kfree(cfqg);
1210}
1211
1212static void cfq_destroy_cfqg(struct cfq_data *cfqd, struct cfq_group *cfqg)
1213{
1214 /* Something wrong if we are trying to remove same group twice */
1215 BUG_ON(hlist_unhashed(&cfqg->cfqd_node));
1216
1217 hlist_del_init(&cfqg->cfqd_node);
1218
1219 BUG_ON(cfqd->nr_blkcg_linked_grps <= 0);
1220 cfqd->nr_blkcg_linked_grps--;
1221
1222 /*
1223 * Put the reference taken at the time of creation so that when all
1224 * queues are gone, group can be destroyed.
1225 */
1226 cfq_put_cfqg(cfqg);
1227}
1228
1229static void cfq_release_cfq_groups(struct cfq_data *cfqd)
1230{
1231 struct hlist_node *pos, *n;
1232 struct cfq_group *cfqg;
1233
1234 hlist_for_each_entry_safe(cfqg, pos, n, &cfqd->cfqg_list, cfqd_node) {
1235 /*
1236 * If cgroup removal path got to blk_group first and removed
1237 * it from cgroup list, then it will take care of destroying
1238 * cfqg also.
1239 */
1240 if (!cfq_blkiocg_del_blkio_group(&cfqg->blkg))
1241 cfq_destroy_cfqg(cfqd, cfqg);
1242 }
1243}
1244
1245/*
1246 * Blk cgroup controller notification saying that blkio_group object is being
1247 * delinked as associated cgroup object is going away. That also means that
1248 * no new IO will come in this group. So get rid of this group as soon as
1249 * any pending IO in the group is finished.
1250 *
1251 * This function is called under rcu_read_lock(). key is the rcu protected
1252 * pointer. That means "key" is a valid cfq_data pointer as long as we are rcu
1253 * read lock.
1254 *
1255 * "key" was fetched from blkio_group under blkio_cgroup->lock. That means
1256 * it should not be NULL as even if elevator was exiting, cgroup deltion
1257 * path got to it first.
1258 */
1259static void cfq_unlink_blkio_group(void *key, struct blkio_group *blkg)
1260{
1261 unsigned long flags;
1262 struct cfq_data *cfqd = key;
1263
1264 spin_lock_irqsave(cfqd->queue->queue_lock, flags);
1265 cfq_destroy_cfqg(cfqd, cfqg_of_blkg(blkg));
1266 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
1267}
1268
1269#else /* GROUP_IOSCHED */
1270static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd)
1271{
1272 return &cfqd->root_group;
1273}
1274
1275static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg)
1276{
1277 return cfqg;
1278}
1279
1280static inline void
1281cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) {
1282 cfqq->cfqg = cfqg;
1283}
1284
1285static void cfq_release_cfq_groups(struct cfq_data *cfqd) {}
1286static inline void cfq_put_cfqg(struct cfq_group *cfqg) {}
1287
1288#endif /* GROUP_IOSCHED */
1289
1290/*
1291 * The cfqd->service_trees holds all pending cfq_queue's that have
1292 * requests waiting to be processed. It is sorted in the order that
1293 * we will service the queues.
1294 */
1295static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1296 bool add_front)
1297{
1298 struct rb_node **p, *parent;
1299 struct cfq_queue *__cfqq;
1300 unsigned long rb_key;
1301 struct cfq_rb_root *service_tree;
1302 int left;
1303 int new_cfqq = 1;
1304
1305 service_tree = service_tree_for(cfqq->cfqg, cfqq_prio(cfqq),
1306 cfqq_type(cfqq));
1307 if (cfq_class_idle(cfqq)) {
1308 rb_key = CFQ_IDLE_DELAY;
1309 parent = rb_last(&service_tree->rb);
1310 if (parent && parent != &cfqq->rb_node) {
1311 __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
1312 rb_key += __cfqq->rb_key;
1313 } else
1314 rb_key += jiffies;
1315 } else if (!add_front) {
1316 /*
1317 * Get our rb key offset. Subtract any residual slice
1318 * value carried from last service. A negative resid
1319 * count indicates slice overrun, and this should position
1320 * the next service time further away in the tree.
1321 */
1322 rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
1323 rb_key -= cfqq->slice_resid;
1324 cfqq->slice_resid = 0;
1325 } else {
1326 rb_key = -HZ;
1327 __cfqq = cfq_rb_first(service_tree);
1328 rb_key += __cfqq ? __cfqq->rb_key : jiffies;
1329 }
1330
1331 if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
1332 new_cfqq = 0;
1333 /*
1334 * same position, nothing more to do
1335 */
1336 if (rb_key == cfqq->rb_key &&
1337 cfqq->service_tree == service_tree)
1338 return;
1339
1340 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
1341 cfqq->service_tree = NULL;
1342 }
1343
1344 left = 1;
1345 parent = NULL;
1346 cfqq->service_tree = service_tree;
1347 p = &service_tree->rb.rb_node;
1348 while (*p) {
1349 struct rb_node **n;
1350
1351 parent = *p;
1352 __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
1353
1354 /*
1355 * sort by key, that represents service time.
1356 */
1357 if (time_before(rb_key, __cfqq->rb_key))
1358 n = &(*p)->rb_left;
1359 else {
1360 n = &(*p)->rb_right;
1361 left = 0;
1362 }
1363
1364 p = n;
1365 }
1366
1367 if (left)
1368 service_tree->left = &cfqq->rb_node;
1369
1370 cfqq->rb_key = rb_key;
1371 rb_link_node(&cfqq->rb_node, parent, p);
1372 rb_insert_color(&cfqq->rb_node, &service_tree->rb);
1373 service_tree->count++;
1374 if (add_front || !new_cfqq)
1375 return;
1376 cfq_group_notify_queue_add(cfqd, cfqq->cfqg);
1377}
1378
1379static struct cfq_queue *
1380cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root,
1381 sector_t sector, struct rb_node **ret_parent,
1382 struct rb_node ***rb_link)
1383{
1384 struct rb_node **p, *parent;
1385 struct cfq_queue *cfqq = NULL;
1386
1387 parent = NULL;
1388 p = &root->rb_node;
1389 while (*p) {
1390 struct rb_node **n;
1391
1392 parent = *p;
1393 cfqq = rb_entry(parent, struct cfq_queue, p_node);
1394
1395 /*
1396 * Sort strictly based on sector. Smallest to the left,
1397 * largest to the right.
1398 */
1399 if (sector > blk_rq_pos(cfqq->next_rq))
1400 n = &(*p)->rb_right;
1401 else if (sector < blk_rq_pos(cfqq->next_rq))
1402 n = &(*p)->rb_left;
1403 else
1404 break;
1405 p = n;
1406 cfqq = NULL;
1407 }
1408
1409 *ret_parent = parent;
1410 if (rb_link)
1411 *rb_link = p;
1412 return cfqq;
1413}
1414
1415static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1416{
1417 struct rb_node **p, *parent;
1418 struct cfq_queue *__cfqq;
1419
1420 if (cfqq->p_root) {
1421 rb_erase(&cfqq->p_node, cfqq->p_root);
1422 cfqq->p_root = NULL;
1423 }
1424
1425 if (cfq_class_idle(cfqq))
1426 return;
1427 if (!cfqq->next_rq)
1428 return;
1429
1430 cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio];
1431 __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root,
1432 blk_rq_pos(cfqq->next_rq), &parent, &p);
1433 if (!__cfqq) {
1434 rb_link_node(&cfqq->p_node, parent, p);
1435 rb_insert_color(&cfqq->p_node, cfqq->p_root);
1436 } else
1437 cfqq->p_root = NULL;
1438}
1439
1440/*
1441 * Update cfqq's position in the service tree.
1442 */
1443static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1444{
1445 /*
1446 * Resorting requires the cfqq to be on the RR list already.
1447 */
1448 if (cfq_cfqq_on_rr(cfqq)) {
1449 cfq_service_tree_add(cfqd, cfqq, 0);
1450 cfq_prio_tree_add(cfqd, cfqq);
1451 }
1452}
1453
1454/*
1455 * add to busy list of queues for service, trying to be fair in ordering
1456 * the pending list according to last request service
1457 */
1458static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1459{
1460 cfq_log_cfqq(cfqd, cfqq, "add_to_rr");
1461 BUG_ON(cfq_cfqq_on_rr(cfqq));
1462 cfq_mark_cfqq_on_rr(cfqq);
1463 cfqd->busy_queues++;
1464 if (cfq_cfqq_sync(cfqq))
1465 cfqd->busy_sync_queues++;
1466
1467 cfq_resort_rr_list(cfqd, cfqq);
1468}
1469
1470/*
1471 * Called when the cfqq no longer has requests pending, remove it from
1472 * the service tree.
1473 */
1474static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1475{
1476 cfq_log_cfqq(cfqd, cfqq, "del_from_rr");
1477 BUG_ON(!cfq_cfqq_on_rr(cfqq));
1478 cfq_clear_cfqq_on_rr(cfqq);
1479
1480 if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
1481 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
1482 cfqq->service_tree = NULL;
1483 }
1484 if (cfqq->p_root) {
1485 rb_erase(&cfqq->p_node, cfqq->p_root);
1486 cfqq->p_root = NULL;
1487 }
1488
1489 cfq_group_notify_queue_del(cfqd, cfqq->cfqg);
1490 BUG_ON(!cfqd->busy_queues);
1491 cfqd->busy_queues--;
1492 if (cfq_cfqq_sync(cfqq))
1493 cfqd->busy_sync_queues--;
1494}
1495
1496/*
1497 * rb tree support functions
1498 */
1499static void cfq_del_rq_rb(struct request *rq)
1500{
1501 struct cfq_queue *cfqq = RQ_CFQQ(rq);
1502 const int sync = rq_is_sync(rq);
1503
1504 BUG_ON(!cfqq->queued[sync]);
1505 cfqq->queued[sync]--;
1506
1507 elv_rb_del(&cfqq->sort_list, rq);
1508
1509 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) {
1510 /*
1511 * Queue will be deleted from service tree when we actually
1512 * expire it later. Right now just remove it from prio tree
1513 * as it is empty.
1514 */
1515 if (cfqq->p_root) {
1516 rb_erase(&cfqq->p_node, cfqq->p_root);
1517 cfqq->p_root = NULL;
1518 }
1519 }
1520}
1521
1522static void cfq_add_rq_rb(struct request *rq)
1523{
1524 struct cfq_queue *cfqq = RQ_CFQQ(rq);
1525 struct cfq_data *cfqd = cfqq->cfqd;
1526 struct request *prev;
1527
1528 cfqq->queued[rq_is_sync(rq)]++;
1529
1530 elv_rb_add(&cfqq->sort_list, rq);
1531
1532 if (!cfq_cfqq_on_rr(cfqq))
1533 cfq_add_cfqq_rr(cfqd, cfqq);
1534
1535 /*
1536 * check if this request is a better next-serve candidate
1537 */
1538 prev = cfqq->next_rq;
1539 cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq, cfqd->last_position);
1540
1541 /*
1542 * adjust priority tree position, if ->next_rq changes
1543 */
1544 if (prev != cfqq->next_rq)
1545 cfq_prio_tree_add(cfqd, cfqq);
1546
1547 BUG_ON(!cfqq->next_rq);
1548}
1549
1550static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
1551{
1552 elv_rb_del(&cfqq->sort_list, rq);
1553 cfqq->queued[rq_is_sync(rq)]--;
1554 cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,
1555 rq_data_dir(rq), rq_is_sync(rq));
1556 cfq_add_rq_rb(rq);
1557 cfq_blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg,
1558 &cfqq->cfqd->serving_group->blkg, rq_data_dir(rq),
1559 rq_is_sync(rq));
1560}
1561
1562static struct request *
1563cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
1564{
1565 struct task_struct *tsk = current;
1566 struct cfq_io_cq *cic;
1567 struct cfq_queue *cfqq;
1568
1569 cic = cfq_cic_lookup(cfqd, tsk->io_context);
1570 if (!cic)
1571 return NULL;
1572
1573 cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
1574 if (cfqq) {
1575 sector_t sector = bio->bi_sector + bio_sectors(bio);
1576
1577 return elv_rb_find(&cfqq->sort_list, sector);
1578 }
1579
1580 return NULL;
1581}
1582
1583static void cfq_activate_request(struct request_queue *q, struct request *rq)
1584{
1585 struct cfq_data *cfqd = q->elevator->elevator_data;
1586
1587 cfqd->rq_in_driver++;
1588 cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
1589 cfqd->rq_in_driver);
1590
1591 cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
1592}
1593
1594static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
1595{
1596 struct cfq_data *cfqd = q->elevator->elevator_data;
1597
1598 WARN_ON(!cfqd->rq_in_driver);
1599 cfqd->rq_in_driver--;
1600 cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d",
1601 cfqd->rq_in_driver);
1602}
1603
1604static void cfq_remove_request(struct request *rq)
1605{
1606 struct cfq_queue *cfqq = RQ_CFQQ(rq);
1607
1608 if (cfqq->next_rq == rq)
1609 cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
1610
1611 list_del_init(&rq->queuelist);
1612 cfq_del_rq_rb(rq);
1613
1614 cfqq->cfqd->rq_queued--;
1615 cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,
1616 rq_data_dir(rq), rq_is_sync(rq));
1617 if (rq->cmd_flags & REQ_PRIO) {
1618 WARN_ON(!cfqq->prio_pending);
1619 cfqq->prio_pending--;
1620 }
1621}
1622
1623static int cfq_merge(struct request_queue *q, struct request **req,
1624 struct bio *bio)
1625{
1626 struct cfq_data *cfqd = q->elevator->elevator_data;
1627 struct request *__rq;
1628
1629 __rq = cfq_find_rq_fmerge(cfqd, bio);
1630 if (__rq && elv_rq_merge_ok(__rq, bio)) {
1631 *req = __rq;
1632 return ELEVATOR_FRONT_MERGE;
1633 }
1634
1635 return ELEVATOR_NO_MERGE;
1636}
1637
1638static void cfq_merged_request(struct request_queue *q, struct request *req,
1639 int type)
1640{
1641 if (type == ELEVATOR_FRONT_MERGE) {
1642 struct cfq_queue *cfqq = RQ_CFQQ(req);
1643
1644 cfq_reposition_rq_rb(cfqq, req);
1645 }
1646}
1647
1648static void cfq_bio_merged(struct request_queue *q, struct request *req,
1649 struct bio *bio)
1650{
1651 cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(req))->blkg,
1652 bio_data_dir(bio), cfq_bio_sync(bio));
1653}
1654
1655static void
1656cfq_merged_requests(struct request_queue *q, struct request *rq,
1657 struct request *next)
1658{
1659 struct cfq_queue *cfqq = RQ_CFQQ(rq);
1660 /*
1661 * reposition in fifo if next is older than rq
1662 */
1663 if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
1664 time_before(rq_fifo_time(next), rq_fifo_time(rq))) {
1665 list_move(&rq->queuelist, &next->queuelist);
1666 rq_set_fifo_time(rq, rq_fifo_time(next));
1667 }
1668
1669 if (cfqq->next_rq == next)
1670 cfqq->next_rq = rq;
1671 cfq_remove_request(next);
1672 cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(rq))->blkg,
1673 rq_data_dir(next), rq_is_sync(next));
1674}
1675
1676static int cfq_allow_merge(struct request_queue *q, struct request *rq,
1677 struct bio *bio)
1678{
1679 struct cfq_data *cfqd = q->elevator->elevator_data;
1680 struct cfq_io_cq *cic;
1681 struct cfq_queue *cfqq;
1682
1683 /*
1684 * Disallow merge of a sync bio into an async request.
1685 */
1686 if (cfq_bio_sync(bio) && !rq_is_sync(rq))
1687 return false;
1688
1689 /*
1690 * Lookup the cfqq that this bio will be queued with and allow
1691 * merge only if rq is queued there. This function can be called
1692 * from plug merge without queue_lock. In such cases, ioc of @rq
1693 * and %current are guaranteed to be equal. Avoid lookup which
1694 * requires queue_lock by using @rq's cic.
1695 */
1696 if (current->io_context == RQ_CIC(rq)->icq.ioc) {
1697 cic = RQ_CIC(rq);
1698 } else {
1699 cic = cfq_cic_lookup(cfqd, current->io_context);
1700 if (!cic)
1701 return false;
1702 }
1703
1704 cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
1705 return cfqq == RQ_CFQQ(rq);
1706}
1707
1708static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1709{
1710 del_timer(&cfqd->idle_slice_timer);
1711 cfq_blkiocg_update_idle_time_stats(&cfqq->cfqg->blkg);
1712}
1713
1714static void __cfq_set_active_queue(struct cfq_data *cfqd,
1715 struct cfq_queue *cfqq)
1716{
1717 if (cfqq) {
1718 cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d",
1719 cfqd->serving_prio, cfqd->serving_type);
1720 cfq_blkiocg_update_avg_queue_size_stats(&cfqq->cfqg->blkg);
1721 cfqq->slice_start = 0;
1722 cfqq->dispatch_start = jiffies;
1723 cfqq->allocated_slice = 0;
1724 cfqq->slice_end = 0;
1725 cfqq->slice_dispatch = 0;
1726 cfqq->nr_sectors = 0;
1727
1728 cfq_clear_cfqq_wait_request(cfqq);
1729 cfq_clear_cfqq_must_dispatch(cfqq);
1730 cfq_clear_cfqq_must_alloc_slice(cfqq);
1731 cfq_clear_cfqq_fifo_expire(cfqq);
1732 cfq_mark_cfqq_slice_new(cfqq);
1733
1734 cfq_del_timer(cfqd, cfqq);
1735 }
1736
1737 cfqd->active_queue = cfqq;
1738}
1739
1740/*
1741 * current cfqq expired its slice (or was too idle), select new one
1742 */
1743static void
1744__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1745 bool timed_out)
1746{
1747 cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
1748
1749 if (cfq_cfqq_wait_request(cfqq))
1750 cfq_del_timer(cfqd, cfqq);
1751
1752 cfq_clear_cfqq_wait_request(cfqq);
1753 cfq_clear_cfqq_wait_busy(cfqq);
1754
1755 /*
1756 * If this cfqq is shared between multiple processes, check to
1757 * make sure that those processes are still issuing I/Os within
1758 * the mean seek distance. If not, it may be time to break the
1759 * queues apart again.
1760 */
1761 if (cfq_cfqq_coop(cfqq) && CFQQ_SEEKY(cfqq))
1762 cfq_mark_cfqq_split_coop(cfqq);
1763
1764 /*
1765 * store what was left of this slice, if the queue idled/timed out
1766 */
1767 if (timed_out) {
1768 if (cfq_cfqq_slice_new(cfqq))
1769 cfqq->slice_resid = cfq_scaled_cfqq_slice(cfqd, cfqq);
1770 else
1771 cfqq->slice_resid = cfqq->slice_end - jiffies;
1772 cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid);
1773 }
1774
1775 cfq_group_served(cfqd, cfqq->cfqg, cfqq);
1776
1777 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
1778 cfq_del_cfqq_rr(cfqd, cfqq);
1779
1780 cfq_resort_rr_list(cfqd, cfqq);
1781
1782 if (cfqq == cfqd->active_queue)
1783 cfqd->active_queue = NULL;
1784
1785 if (cfqd->active_cic) {
1786 put_io_context(cfqd->active_cic->icq.ioc, cfqd->queue);
1787 cfqd->active_cic = NULL;
1788 }
1789}
1790
1791static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
1792{
1793 struct cfq_queue *cfqq = cfqd->active_queue;
1794
1795 if (cfqq)
1796 __cfq_slice_expired(cfqd, cfqq, timed_out);
1797}
1798
1799/*
1800 * Get next queue for service. Unless we have a queue preemption,
1801 * we'll simply select the first cfqq in the service tree.
1802 */
1803static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
1804{
1805 struct cfq_rb_root *service_tree =
1806 service_tree_for(cfqd->serving_group, cfqd->serving_prio,
1807 cfqd->serving_type);
1808
1809 if (!cfqd->rq_queued)
1810 return NULL;
1811
1812 /* There is nothing to dispatch */
1813 if (!service_tree)
1814 return NULL;
1815 if (RB_EMPTY_ROOT(&service_tree->rb))
1816 return NULL;
1817 return cfq_rb_first(service_tree);
1818}
1819
1820static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd)
1821{
1822 struct cfq_group *cfqg;
1823 struct cfq_queue *cfqq;
1824 int i, j;
1825 struct cfq_rb_root *st;
1826
1827 if (!cfqd->rq_queued)
1828 return NULL;
1829
1830 cfqg = cfq_get_next_cfqg(cfqd);
1831 if (!cfqg)
1832 return NULL;
1833
1834 for_each_cfqg_st(cfqg, i, j, st)
1835 if ((cfqq = cfq_rb_first(st)) != NULL)
1836 return cfqq;
1837 return NULL;
1838}
1839
1840/*
1841 * Get and set a new active queue for service.
1842 */
1843static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
1844 struct cfq_queue *cfqq)
1845{
1846 if (!cfqq)
1847 cfqq = cfq_get_next_queue(cfqd);
1848
1849 __cfq_set_active_queue(cfqd, cfqq);
1850 return cfqq;
1851}
1852
1853static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
1854 struct request *rq)
1855{
1856 if (blk_rq_pos(rq) >= cfqd->last_position)
1857 return blk_rq_pos(rq) - cfqd->last_position;
1858 else
1859 return cfqd->last_position - blk_rq_pos(rq);
1860}
1861
1862static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1863 struct request *rq)
1864{
1865 return cfq_dist_from_last(cfqd, rq) <= CFQQ_CLOSE_THR;
1866}
1867
1868static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
1869 struct cfq_queue *cur_cfqq)
1870{
1871 struct rb_root *root = &cfqd->prio_trees[cur_cfqq->org_ioprio];
1872 struct rb_node *parent, *node;
1873 struct cfq_queue *__cfqq;
1874 sector_t sector = cfqd->last_position;
1875
1876 if (RB_EMPTY_ROOT(root))
1877 return NULL;
1878
1879 /*
1880 * First, if we find a request starting at the end of the last
1881 * request, choose it.
1882 */
1883 __cfqq = cfq_prio_tree_lookup(cfqd, root, sector, &parent, NULL);
1884 if (__cfqq)
1885 return __cfqq;
1886
1887 /*
1888 * If the exact sector wasn't found, the parent of the NULL leaf
1889 * will contain the closest sector.
1890 */
1891 __cfqq = rb_entry(parent, struct cfq_queue, p_node);
1892 if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
1893 return __cfqq;
1894
1895 if (blk_rq_pos(__cfqq->next_rq) < sector)
1896 node = rb_next(&__cfqq->p_node);
1897 else
1898 node = rb_prev(&__cfqq->p_node);
1899 if (!node)
1900 return NULL;
1901
1902 __cfqq = rb_entry(node, struct cfq_queue, p_node);
1903 if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
1904 return __cfqq;
1905
1906 return NULL;
1907}
1908
1909/*
1910 * cfqd - obvious
1911 * cur_cfqq - passed in so that we don't decide that the current queue is
1912 * closely cooperating with itself.
1913 *
1914 * So, basically we're assuming that that cur_cfqq has dispatched at least
1915 * one request, and that cfqd->last_position reflects a position on the disk
1916 * associated with the I/O issued by cur_cfqq. I'm not sure this is a valid
1917 * assumption.
1918 */
1919static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
1920 struct cfq_queue *cur_cfqq)
1921{
1922 struct cfq_queue *cfqq;
1923
1924 if (cfq_class_idle(cur_cfqq))
1925 return NULL;
1926 if (!cfq_cfqq_sync(cur_cfqq))
1927 return NULL;
1928 if (CFQQ_SEEKY(cur_cfqq))
1929 return NULL;
1930
1931 /*
1932 * Don't search priority tree if it's the only queue in the group.
1933 */
1934 if (cur_cfqq->cfqg->nr_cfqq == 1)
1935 return NULL;
1936
1937 /*
1938 * We should notice if some of the queues are cooperating, eg
1939 * working closely on the same area of the disk. In that case,
1940 * we can group them together and don't waste time idling.
1941 */
1942 cfqq = cfqq_close(cfqd, cur_cfqq);
1943 if (!cfqq)
1944 return NULL;
1945
1946 /* If new queue belongs to different cfq_group, don't choose it */
1947 if (cur_cfqq->cfqg != cfqq->cfqg)
1948 return NULL;
1949
1950 /*
1951 * It only makes sense to merge sync queues.
1952 */
1953 if (!cfq_cfqq_sync(cfqq))
1954 return NULL;
1955 if (CFQQ_SEEKY(cfqq))
1956 return NULL;
1957
1958 /*
1959 * Do not merge queues of different priority classes
1960 */
1961 if (cfq_class_rt(cfqq) != cfq_class_rt(cur_cfqq))
1962 return NULL;
1963
1964 return cfqq;
1965}
1966
1967/*
1968 * Determine whether we should enforce idle window for this queue.
1969 */
1970
1971static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1972{
1973 enum wl_prio_t prio = cfqq_prio(cfqq);
1974 struct cfq_rb_root *service_tree = cfqq->service_tree;
1975
1976 BUG_ON(!service_tree);
1977 BUG_ON(!service_tree->count);
1978
1979 if (!cfqd->cfq_slice_idle)
1980 return false;
1981
1982 /* We never do for idle class queues. */
1983 if (prio == IDLE_WORKLOAD)
1984 return false;
1985
1986 /* We do for queues that were marked with idle window flag. */
1987 if (cfq_cfqq_idle_window(cfqq) &&
1988 !(blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag))
1989 return true;
1990
1991 /*
1992 * Otherwise, we do only if they are the last ones
1993 * in their service tree.
1994 */
1995 if (service_tree->count == 1 && cfq_cfqq_sync(cfqq) &&
1996 !cfq_io_thinktime_big(cfqd, &service_tree->ttime, false))
1997 return true;
1998 cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d",
1999 service_tree->count);
2000 return false;
2001}
2002
2003static void cfq_arm_slice_timer(struct cfq_data *cfqd)
2004{
2005 struct cfq_queue *cfqq = cfqd->active_queue;
2006 struct cfq_io_cq *cic;
2007 unsigned long sl, group_idle = 0;
2008
2009 /*
2010 * SSD device without seek penalty, disable idling. But only do so
2011 * for devices that support queuing, otherwise we still have a problem
2012 * with sync vs async workloads.
2013 */
2014 if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)
2015 return;
2016
2017 WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
2018 WARN_ON(cfq_cfqq_slice_new(cfqq));
2019
2020 /*
2021 * idle is disabled, either manually or by past process history
2022 */
2023 if (!cfq_should_idle(cfqd, cfqq)) {
2024 /* no queue idling. Check for group idling */
2025 if (cfqd->cfq_group_idle)
2026 group_idle = cfqd->cfq_group_idle;
2027 else
2028 return;
2029 }
2030
2031 /*
2032 * still active requests from this queue, don't idle
2033 */
2034 if (cfqq->dispatched)
2035 return;
2036
2037 /*
2038 * task has exited, don't wait
2039 */
2040 cic = cfqd->active_cic;
2041 if (!cic || !atomic_read(&cic->icq.ioc->nr_tasks))
2042 return;
2043
2044 /*
2045 * If our average think time is larger than the remaining time
2046 * slice, then don't idle. This avoids overrunning the allotted
2047 * time slice.
2048 */
2049 if (sample_valid(cic->ttime.ttime_samples) &&
2050 (cfqq->slice_end - jiffies < cic->ttime.ttime_mean)) {
2051 cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%lu",
2052 cic->ttime.ttime_mean);
2053 return;
2054 }
2055
2056 /* There are other queues in the group, don't do group idle */
2057 if (group_idle && cfqq->cfqg->nr_cfqq > 1)
2058 return;
2059
2060 cfq_mark_cfqq_wait_request(cfqq);
2061
2062 if (group_idle)
2063 sl = cfqd->cfq_group_idle;
2064 else
2065 sl = cfqd->cfq_slice_idle;
2066
2067 mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
2068 cfq_blkiocg_update_set_idle_time_stats(&cfqq->cfqg->blkg);
2069 cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl,
2070 group_idle ? 1 : 0);
2071}
2072
2073/*
2074 * Move request from internal lists to the request queue dispatch list.
2075 */
2076static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
2077{
2078 struct cfq_data *cfqd = q->elevator->elevator_data;
2079 struct cfq_queue *cfqq = RQ_CFQQ(rq);
2080
2081 cfq_log_cfqq(cfqd, cfqq, "dispatch_insert");
2082
2083 cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq);
2084 cfq_remove_request(rq);
2085 cfqq->dispatched++;
2086 (RQ_CFQG(rq))->dispatched++;
2087 elv_dispatch_sort(q, rq);
2088
2089 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
2090 cfqq->nr_sectors += blk_rq_sectors(rq);
2091 cfq_blkiocg_update_dispatch_stats(&cfqq->cfqg->blkg, blk_rq_bytes(rq),
2092 rq_data_dir(rq), rq_is_sync(rq));
2093}
2094
2095/*
2096 * return expired entry, or NULL to just start from scratch in rbtree
2097 */
2098static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
2099{
2100 struct request *rq = NULL;
2101
2102 if (cfq_cfqq_fifo_expire(cfqq))
2103 return NULL;
2104
2105 cfq_mark_cfqq_fifo_expire(cfqq);
2106
2107 if (list_empty(&cfqq->fifo))
2108 return NULL;
2109
2110 rq = rq_entry_fifo(cfqq->fifo.next);
2111 if (time_before(jiffies, rq_fifo_time(rq)))
2112 rq = NULL;
2113
2114 cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
2115 return rq;
2116}
2117
2118static inline int
2119cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2120{
2121 const int base_rq = cfqd->cfq_slice_async_rq;
2122
2123 WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
2124
2125 return 2 * base_rq * (IOPRIO_BE_NR - cfqq->ioprio);
2126}
2127
2128/*
2129 * Must be called with the queue_lock held.
2130 */
2131static int cfqq_process_refs(struct cfq_queue *cfqq)
2132{
2133 int process_refs, io_refs;
2134
2135 io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE];
2136 process_refs = cfqq->ref - io_refs;
2137 BUG_ON(process_refs < 0);
2138 return process_refs;
2139}
2140
2141static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
2142{
2143 int process_refs, new_process_refs;
2144 struct cfq_queue *__cfqq;
2145
2146 /*
2147 * If there are no process references on the new_cfqq, then it is
2148 * unsafe to follow the ->new_cfqq chain as other cfqq's in the
2149 * chain may have dropped their last reference (not just their
2150 * last process reference).
2151 */
2152 if (!cfqq_process_refs(new_cfqq))
2153 return;
2154
2155 /* Avoid a circular list and skip interim queue merges */
2156 while ((__cfqq = new_cfqq->new_cfqq)) {
2157 if (__cfqq == cfqq)
2158 return;
2159 new_cfqq = __cfqq;
2160 }
2161
2162 process_refs = cfqq_process_refs(cfqq);
2163 new_process_refs = cfqq_process_refs(new_cfqq);
2164 /*
2165 * If the process for the cfqq has gone away, there is no
2166 * sense in merging the queues.
2167 */
2168 if (process_refs == 0 || new_process_refs == 0)
2169 return;
2170
2171 /*
2172 * Merge in the direction of the lesser amount of work.
2173 */
2174 if (new_process_refs >= process_refs) {
2175 cfqq->new_cfqq = new_cfqq;
2176 new_cfqq->ref += process_refs;
2177 } else {
2178 new_cfqq->new_cfqq = cfqq;
2179 cfqq->ref += new_process_refs;
2180 }
2181}
2182
2183static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd,
2184 struct cfq_group *cfqg, enum wl_prio_t prio)
2185{
2186 struct cfq_queue *queue;
2187 int i;
2188 bool key_valid = false;
2189 unsigned long lowest_key = 0;
2190 enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD;
2191
2192 for (i = 0; i <= SYNC_WORKLOAD; ++i) {
2193 /* select the one with lowest rb_key */
2194 queue = cfq_rb_first(service_tree_for(cfqg, prio, i));
2195 if (queue &&
2196 (!key_valid || time_before(queue->rb_key, lowest_key))) {
2197 lowest_key = queue->rb_key;
2198 cur_best = i;
2199 key_valid = true;
2200 }
2201 }
2202
2203 return cur_best;
2204}
2205
2206static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
2207{
2208 unsigned slice;
2209 unsigned count;
2210 struct cfq_rb_root *st;
2211 unsigned group_slice;
2212 enum wl_prio_t original_prio = cfqd->serving_prio;
2213
2214 /* Choose next priority. RT > BE > IDLE */
2215 if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg))
2216 cfqd->serving_prio = RT_WORKLOAD;
2217 else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg))
2218 cfqd->serving_prio = BE_WORKLOAD;
2219 else {
2220 cfqd->serving_prio = IDLE_WORKLOAD;
2221 cfqd->workload_expires = jiffies + 1;
2222 return;
2223 }
2224
2225 if (original_prio != cfqd->serving_prio)
2226 goto new_workload;
2227
2228 /*
2229 * For RT and BE, we have to choose also the type
2230 * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload
2231 * expiration time
2232 */
2233 st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
2234 count = st->count;
2235
2236 /*
2237 * check workload expiration, and that we still have other queues ready
2238 */
2239 if (count && !time_after(jiffies, cfqd->workload_expires))
2240 return;
2241
2242new_workload:
2243 /* otherwise select new workload type */
2244 cfqd->serving_type =
2245 cfq_choose_wl(cfqd, cfqg, cfqd->serving_prio);
2246 st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
2247 count = st->count;
2248
2249 /*
2250 * the workload slice is computed as a fraction of target latency
2251 * proportional to the number of queues in that workload, over
2252 * all the queues in the same priority class
2253 */
2254 group_slice = cfq_group_slice(cfqd, cfqg);
2255
2256 slice = group_slice * count /
2257 max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_prio],
2258 cfq_group_busy_queues_wl(cfqd->serving_prio, cfqd, cfqg));
2259
2260 if (cfqd->serving_type == ASYNC_WORKLOAD) {
2261 unsigned int tmp;
2262
2263 /*
2264 * Async queues are currently system wide. Just taking
2265 * proportion of queues with-in same group will lead to higher
2266 * async ratio system wide as generally root group is going
2267 * to have higher weight. A more accurate thing would be to
2268 * calculate system wide asnc/sync ratio.
2269 */
2270 tmp = cfq_target_latency * cfqg_busy_async_queues(cfqd, cfqg);
2271 tmp = tmp/cfqd->busy_queues;
2272 slice = min_t(unsigned, slice, tmp);
2273
2274 /* async workload slice is scaled down according to
2275 * the sync/async slice ratio. */
2276 slice = slice * cfqd->cfq_slice[0] / cfqd->cfq_slice[1];
2277 } else
2278 /* sync workload slice is at least 2 * cfq_slice_idle */
2279 slice = max(slice, 2 * cfqd->cfq_slice_idle);
2280
2281 slice = max_t(unsigned, slice, CFQ_MIN_TT);
2282 cfq_log(cfqd, "workload slice:%d", slice);
2283 cfqd->workload_expires = jiffies + slice;
2284}
2285
2286static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd)
2287{
2288 struct cfq_rb_root *st = &cfqd->grp_service_tree;
2289 struct cfq_group *cfqg;
2290
2291 if (RB_EMPTY_ROOT(&st->rb))
2292 return NULL;
2293 cfqg = cfq_rb_first_group(st);
2294 update_min_vdisktime(st);
2295 return cfqg;
2296}
2297
2298static void cfq_choose_cfqg(struct cfq_data *cfqd)
2299{
2300 struct cfq_group *cfqg = cfq_get_next_cfqg(cfqd);
2301
2302 cfqd->serving_group = cfqg;
2303
2304 /* Restore the workload type data */
2305 if (cfqg->saved_workload_slice) {
2306 cfqd->workload_expires = jiffies + cfqg->saved_workload_slice;
2307 cfqd->serving_type = cfqg->saved_workload;
2308 cfqd->serving_prio = cfqg->saved_serving_prio;
2309 } else
2310 cfqd->workload_expires = jiffies - 1;
2311
2312 choose_service_tree(cfqd, cfqg);
2313}
2314
2315/*
2316 * Select a queue for service. If we have a current active queue,
2317 * check whether to continue servicing it, or retrieve and set a new one.
2318 */
2319static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
2320{
2321 struct cfq_queue *cfqq, *new_cfqq = NULL;
2322
2323 cfqq = cfqd->active_queue;
2324 if (!cfqq)
2325 goto new_queue;
2326
2327 if (!cfqd->rq_queued)
2328 return NULL;
2329
2330 /*
2331 * We were waiting for group to get backlogged. Expire the queue
2332 */
2333 if (cfq_cfqq_wait_busy(cfqq) && !RB_EMPTY_ROOT(&cfqq->sort_list))
2334 goto expire;
2335
2336 /*
2337 * The active queue has run out of time, expire it and select new.
2338 */
2339 if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) {
2340 /*
2341 * If slice had not expired at the completion of last request
2342 * we might not have turned on wait_busy flag. Don't expire
2343 * the queue yet. Allow the group to get backlogged.
2344 *
2345 * The very fact that we have used the slice, that means we
2346 * have been idling all along on this queue and it should be
2347 * ok to wait for this request to complete.
2348 */
2349 if (cfqq->cfqg->nr_cfqq == 1 && RB_EMPTY_ROOT(&cfqq->sort_list)
2350 && cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
2351 cfqq = NULL;
2352 goto keep_queue;
2353 } else
2354 goto check_group_idle;
2355 }
2356
2357 /*
2358 * The active queue has requests and isn't expired, allow it to
2359 * dispatch.
2360 */
2361 if (!RB_EMPTY_ROOT(&cfqq->sort_list))
2362 goto keep_queue;
2363
2364 /*
2365 * If another queue has a request waiting within our mean seek
2366 * distance, let it run. The expire code will check for close
2367 * cooperators and put the close queue at the front of the service
2368 * tree. If possible, merge the expiring queue with the new cfqq.
2369 */
2370 new_cfqq = cfq_close_cooperator(cfqd, cfqq);
2371 if (new_cfqq) {
2372 if (!cfqq->new_cfqq)
2373 cfq_setup_merge(cfqq, new_cfqq);
2374 goto expire;
2375 }
2376
2377 /*
2378 * No requests pending. If the active queue still has requests in
2379 * flight or is idling for a new request, allow either of these
2380 * conditions to happen (or time out) before selecting a new queue.
2381 */
2382 if (timer_pending(&cfqd->idle_slice_timer)) {
2383 cfqq = NULL;
2384 goto keep_queue;
2385 }
2386
2387 /*
2388 * This is a deep seek queue, but the device is much faster than
2389 * the queue can deliver, don't idle
2390 **/
2391 if (CFQQ_SEEKY(cfqq) && cfq_cfqq_idle_window(cfqq) &&
2392 (cfq_cfqq_slice_new(cfqq) ||
2393 (cfqq->slice_end - jiffies > jiffies - cfqq->slice_start))) {
2394 cfq_clear_cfqq_deep(cfqq);
2395 cfq_clear_cfqq_idle_window(cfqq);
2396 }
2397
2398 if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
2399 cfqq = NULL;
2400 goto keep_queue;
2401 }
2402
2403 /*
2404 * If group idle is enabled and there are requests dispatched from
2405 * this group, wait for requests to complete.
2406 */
2407check_group_idle:
2408 if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1 &&
2409 cfqq->cfqg->dispatched &&
2410 !cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true)) {
2411 cfqq = NULL;
2412 goto keep_queue;
2413 }
2414
2415expire:
2416 cfq_slice_expired(cfqd, 0);
2417new_queue:
2418 /*
2419 * Current queue expired. Check if we have to switch to a new
2420 * service tree
2421 */
2422 if (!new_cfqq)
2423 cfq_choose_cfqg(cfqd);
2424
2425 cfqq = cfq_set_active_queue(cfqd, new_cfqq);
2426keep_queue:
2427 return cfqq;
2428}
2429
2430static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
2431{
2432 int dispatched = 0;
2433
2434 while (cfqq->next_rq) {
2435 cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
2436 dispatched++;
2437 }
2438
2439 BUG_ON(!list_empty(&cfqq->fifo));
2440
2441 /* By default cfqq is not expired if it is empty. Do it explicitly */
2442 __cfq_slice_expired(cfqq->cfqd, cfqq, 0);
2443 return dispatched;
2444}
2445
2446/*
2447 * Drain our current requests. Used for barriers and when switching
2448 * io schedulers on-the-fly.
2449 */
2450static int cfq_forced_dispatch(struct cfq_data *cfqd)
2451{
2452 struct cfq_queue *cfqq;
2453 int dispatched = 0;
2454
2455 /* Expire the timeslice of the current active queue first */
2456 cfq_slice_expired(cfqd, 0);
2457 while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) {
2458 __cfq_set_active_queue(cfqd, cfqq);
2459 dispatched += __cfq_forced_dispatch_cfqq(cfqq);
2460 }
2461
2462 BUG_ON(cfqd->busy_queues);
2463
2464 cfq_log(cfqd, "forced_dispatch=%d", dispatched);
2465 return dispatched;
2466}
2467
2468static inline bool cfq_slice_used_soon(struct cfq_data *cfqd,
2469 struct cfq_queue *cfqq)
2470{
2471 /* the queue hasn't finished any request, can't estimate */
2472 if (cfq_cfqq_slice_new(cfqq))
2473 return true;
2474 if (time_after(jiffies + cfqd->cfq_slice_idle * cfqq->dispatched,
2475 cfqq->slice_end))
2476 return true;
2477
2478 return false;
2479}
2480
2481static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2482{
2483 unsigned int max_dispatch;
2484
2485 /*
2486 * Drain async requests before we start sync IO
2487 */
2488 if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_flight[BLK_RW_ASYNC])
2489 return false;
2490
2491 /*
2492 * If this is an async queue and we have sync IO in flight, let it wait
2493 */
2494 if (cfqd->rq_in_flight[BLK_RW_SYNC] && !cfq_cfqq_sync(cfqq))
2495 return false;
2496
2497 max_dispatch = max_t(unsigned int, cfqd->cfq_quantum / 2, 1);
2498 if (cfq_class_idle(cfqq))
2499 max_dispatch = 1;
2500
2501 /*
2502 * Does this cfqq already have too much IO in flight?
2503 */
2504 if (cfqq->dispatched >= max_dispatch) {
2505 bool promote_sync = false;
2506 /*
2507 * idle queue must always only have a single IO in flight
2508 */
2509 if (cfq_class_idle(cfqq))
2510 return false;
2511
2512 /*
2513 * If there is only one sync queue
2514 * we can ignore async queue here and give the sync
2515 * queue no dispatch limit. The reason is a sync queue can
2516 * preempt async queue, limiting the sync queue doesn't make
2517 * sense. This is useful for aiostress test.
2518 */
2519 if (cfq_cfqq_sync(cfqq) && cfqd->busy_sync_queues == 1)
2520 promote_sync = true;
2521
2522 /*
2523 * We have other queues, don't allow more IO from this one
2524 */
2525 if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq) &&
2526 !promote_sync)
2527 return false;
2528
2529 /*
2530 * Sole queue user, no limit
2531 */
2532 if (cfqd->busy_queues == 1 || promote_sync)
2533 max_dispatch = -1;
2534 else
2535 /*
2536 * Normally we start throttling cfqq when cfq_quantum/2
2537 * requests have been dispatched. But we can drive
2538 * deeper queue depths at the beginning of slice
2539 * subjected to upper limit of cfq_quantum.
2540 * */
2541 max_dispatch = cfqd->cfq_quantum;
2542 }
2543
2544 /*
2545 * Async queues must wait a bit before being allowed dispatch.
2546 * We also ramp up the dispatch depth gradually for async IO,
2547 * based on the last sync IO we serviced
2548 */
2549 if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
2550 unsigned long last_sync = jiffies - cfqd->last_delayed_sync;
2551 unsigned int depth;
2552
2553 depth = last_sync / cfqd->cfq_slice[1];
2554 if (!depth && !cfqq->dispatched)
2555 depth = 1;
2556 if (depth < max_dispatch)
2557 max_dispatch = depth;
2558 }
2559
2560 /*
2561 * If we're below the current max, allow a dispatch
2562 */
2563 return cfqq->dispatched < max_dispatch;
2564}
2565
2566/*
2567 * Dispatch a request from cfqq, moving them to the request queue
2568 * dispatch list.
2569 */
2570static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2571{
2572 struct request *rq;
2573
2574 BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
2575
2576 if (!cfq_may_dispatch(cfqd, cfqq))
2577 return false;
2578
2579 /*
2580 * follow expired path, else get first next available
2581 */
2582 rq = cfq_check_fifo(cfqq);
2583 if (!rq)
2584 rq = cfqq->next_rq;
2585
2586 /*
2587 * insert request into driver dispatch list
2588 */
2589 cfq_dispatch_insert(cfqd->queue, rq);
2590
2591 if (!cfqd->active_cic) {
2592 struct cfq_io_cq *cic = RQ_CIC(rq);
2593
2594 atomic_long_inc(&cic->icq.ioc->refcount);
2595 cfqd->active_cic = cic;
2596 }
2597
2598 return true;
2599}
2600
2601/*
2602 * Find the cfqq that we need to service and move a request from that to the
2603 * dispatch list
2604 */
2605static int cfq_dispatch_requests(struct request_queue *q, int force)
2606{
2607 struct cfq_data *cfqd = q->elevator->elevator_data;
2608 struct cfq_queue *cfqq;
2609
2610 if (!cfqd->busy_queues)
2611 return 0;
2612
2613 if (unlikely(force))
2614 return cfq_forced_dispatch(cfqd);
2615
2616 cfqq = cfq_select_queue(cfqd);
2617 if (!cfqq)
2618 return 0;
2619
2620 /*
2621 * Dispatch a request from this cfqq, if it is allowed
2622 */
2623 if (!cfq_dispatch_request(cfqd, cfqq))
2624 return 0;
2625
2626 cfqq->slice_dispatch++;
2627 cfq_clear_cfqq_must_dispatch(cfqq);
2628
2629 /*
2630 * expire an async queue immediately if it has used up its slice. idle
2631 * queue always expire after 1 dispatch round.
2632 */
2633 if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
2634 cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
2635 cfq_class_idle(cfqq))) {
2636 cfqq->slice_end = jiffies + 1;
2637 cfq_slice_expired(cfqd, 0);
2638 }
2639
2640 cfq_log_cfqq(cfqd, cfqq, "dispatched a request");
2641 return 1;
2642}
2643
2644/*
2645 * task holds one reference to the queue, dropped when task exits. each rq
2646 * in-flight on this queue also holds a reference, dropped when rq is freed.
2647 *
2648 * Each cfq queue took a reference on the parent group. Drop it now.
2649 * queue lock must be held here.
2650 */
2651static void cfq_put_queue(struct cfq_queue *cfqq)
2652{
2653 struct cfq_data *cfqd = cfqq->cfqd;
2654 struct cfq_group *cfqg;
2655
2656 BUG_ON(cfqq->ref <= 0);
2657
2658 cfqq->ref--;
2659 if (cfqq->ref)
2660 return;
2661
2662 cfq_log_cfqq(cfqd, cfqq, "put_queue");
2663 BUG_ON(rb_first(&cfqq->sort_list));
2664 BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
2665 cfqg = cfqq->cfqg;
2666
2667 if (unlikely(cfqd->active_queue == cfqq)) {
2668 __cfq_slice_expired(cfqd, cfqq, 0);
2669 cfq_schedule_dispatch(cfqd);
2670 }
2671
2672 BUG_ON(cfq_cfqq_on_rr(cfqq));
2673 kmem_cache_free(cfq_pool, cfqq);
2674 cfq_put_cfqg(cfqg);
2675}
2676
2677static void cfq_put_cooperator(struct cfq_queue *cfqq)
2678{
2679 struct cfq_queue *__cfqq, *next;
2680
2681 /*
2682 * If this queue was scheduled to merge with another queue, be
2683 * sure to drop the reference taken on that queue (and others in
2684 * the merge chain). See cfq_setup_merge and cfq_merge_cfqqs.
2685 */
2686 __cfqq = cfqq->new_cfqq;
2687 while (__cfqq) {
2688 if (__cfqq == cfqq) {
2689 WARN(1, "cfqq->new_cfqq loop detected\n");
2690 break;
2691 }
2692 next = __cfqq->new_cfqq;
2693 cfq_put_queue(__cfqq);
2694 __cfqq = next;
2695 }
2696}
2697
2698static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2699{
2700 if (unlikely(cfqq == cfqd->active_queue)) {
2701 __cfq_slice_expired(cfqd, cfqq, 0);
2702 cfq_schedule_dispatch(cfqd);
2703 }
2704
2705 cfq_put_cooperator(cfqq);
2706
2707 cfq_put_queue(cfqq);
2708}
2709
2710static void cfq_exit_icq(struct io_cq *icq)
2711{
2712 struct cfq_io_cq *cic = icq_to_cic(icq);
2713 struct cfq_data *cfqd = cic_to_cfqd(cic);
2714
2715 if (cic->cfqq[BLK_RW_ASYNC]) {
2716 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]);
2717 cic->cfqq[BLK_RW_ASYNC] = NULL;
2718 }
2719
2720 if (cic->cfqq[BLK_RW_SYNC]) {
2721 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_SYNC]);
2722 cic->cfqq[BLK_RW_SYNC] = NULL;
2723 }
2724}
2725
2726static struct cfq_io_cq *cfq_alloc_cic(struct cfq_data *cfqd, gfp_t gfp_mask)
2727{
2728 struct cfq_io_cq *cic;
2729
2730 cic = kmem_cache_alloc_node(cfq_icq_pool, gfp_mask | __GFP_ZERO,
2731 cfqd->queue->node);
2732 if (cic) {
2733 cic->ttime.last_end_request = jiffies;
2734 INIT_LIST_HEAD(&cic->icq.q_node);
2735 INIT_HLIST_NODE(&cic->icq.ioc_node);
2736 }
2737
2738 return cic;
2739}
2740
2741static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc)
2742{
2743 struct task_struct *tsk = current;
2744 int ioprio_class;
2745
2746 if (!cfq_cfqq_prio_changed(cfqq))
2747 return;
2748
2749 ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio);
2750 switch (ioprio_class) {
2751 default:
2752 printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
2753 case IOPRIO_CLASS_NONE:
2754 /*
2755 * no prio set, inherit CPU scheduling settings
2756 */
2757 cfqq->ioprio = task_nice_ioprio(tsk);
2758 cfqq->ioprio_class = task_nice_ioclass(tsk);
2759 break;
2760 case IOPRIO_CLASS_RT:
2761 cfqq->ioprio = task_ioprio(ioc);
2762 cfqq->ioprio_class = IOPRIO_CLASS_RT;
2763 break;
2764 case IOPRIO_CLASS_BE:
2765 cfqq->ioprio = task_ioprio(ioc);
2766 cfqq->ioprio_class = IOPRIO_CLASS_BE;
2767 break;
2768 case IOPRIO_CLASS_IDLE:
2769 cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
2770 cfqq->ioprio = 7;
2771 cfq_clear_cfqq_idle_window(cfqq);
2772 break;
2773 }
2774
2775 /*
2776 * keep track of original prio settings in case we have to temporarily
2777 * elevate the priority of this queue
2778 */
2779 cfqq->org_ioprio = cfqq->ioprio;
2780 cfq_clear_cfqq_prio_changed(cfqq);
2781}
2782
2783static void changed_ioprio(struct cfq_io_cq *cic)
2784{
2785 struct cfq_data *cfqd = cic_to_cfqd(cic);
2786 struct cfq_queue *cfqq;
2787
2788 if (unlikely(!cfqd))
2789 return;
2790
2791 cfqq = cic->cfqq[BLK_RW_ASYNC];
2792 if (cfqq) {
2793 struct cfq_queue *new_cfqq;
2794 new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic->icq.ioc,
2795 GFP_ATOMIC);
2796 if (new_cfqq) {
2797 cic->cfqq[BLK_RW_ASYNC] = new_cfqq;
2798 cfq_put_queue(cfqq);
2799 }
2800 }
2801
2802 cfqq = cic->cfqq[BLK_RW_SYNC];
2803 if (cfqq)
2804 cfq_mark_cfqq_prio_changed(cfqq);
2805}
2806
2807static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2808 pid_t pid, bool is_sync)
2809{
2810 RB_CLEAR_NODE(&cfqq->rb_node);
2811 RB_CLEAR_NODE(&cfqq->p_node);
2812 INIT_LIST_HEAD(&cfqq->fifo);
2813
2814 cfqq->ref = 0;
2815 cfqq->cfqd = cfqd;
2816
2817 cfq_mark_cfqq_prio_changed(cfqq);
2818
2819 if (is_sync) {
2820 if (!cfq_class_idle(cfqq))
2821 cfq_mark_cfqq_idle_window(cfqq);
2822 cfq_mark_cfqq_sync(cfqq);
2823 }
2824 cfqq->pid = pid;
2825}
2826
2827#ifdef CONFIG_CFQ_GROUP_IOSCHED
2828static void changed_cgroup(struct cfq_io_cq *cic)
2829{
2830 struct cfq_queue *sync_cfqq = cic_to_cfqq(cic, 1);
2831 struct cfq_data *cfqd = cic_to_cfqd(cic);
2832 struct request_queue *q;
2833
2834 if (unlikely(!cfqd))
2835 return;
2836
2837 q = cfqd->queue;
2838
2839 if (sync_cfqq) {
2840 /*
2841 * Drop reference to sync queue. A new sync queue will be
2842 * assigned in new group upon arrival of a fresh request.
2843 */
2844 cfq_log_cfqq(cfqd, sync_cfqq, "changed cgroup");
2845 cic_set_cfqq(cic, NULL, 1);
2846 cfq_put_queue(sync_cfqq);
2847 }
2848}
2849#endif /* CONFIG_CFQ_GROUP_IOSCHED */
2850
2851static struct cfq_queue *
2852cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync,
2853 struct io_context *ioc, gfp_t gfp_mask)
2854{
2855 struct cfq_queue *cfqq, *new_cfqq = NULL;
2856 struct cfq_io_cq *cic;
2857 struct cfq_group *cfqg;
2858
2859retry:
2860 cfqg = cfq_get_cfqg(cfqd);
2861 cic = cfq_cic_lookup(cfqd, ioc);
2862 /* cic always exists here */
2863 cfqq = cic_to_cfqq(cic, is_sync);
2864
2865 /*
2866 * Always try a new alloc if we fell back to the OOM cfqq
2867 * originally, since it should just be a temporary situation.
2868 */
2869 if (!cfqq || cfqq == &cfqd->oom_cfqq) {
2870 cfqq = NULL;
2871 if (new_cfqq) {
2872 cfqq = new_cfqq;
2873 new_cfqq = NULL;
2874 } else if (gfp_mask & __GFP_WAIT) {
2875 spin_unlock_irq(cfqd->queue->queue_lock);
2876 new_cfqq = kmem_cache_alloc_node(cfq_pool,
2877 gfp_mask | __GFP_ZERO,
2878 cfqd->queue->node);
2879 spin_lock_irq(cfqd->queue->queue_lock);
2880 if (new_cfqq)
2881 goto retry;
2882 } else {
2883 cfqq = kmem_cache_alloc_node(cfq_pool,
2884 gfp_mask | __GFP_ZERO,
2885 cfqd->queue->node);
2886 }
2887
2888 if (cfqq) {
2889 cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
2890 cfq_init_prio_data(cfqq, ioc);
2891 cfq_link_cfqq_cfqg(cfqq, cfqg);
2892 cfq_log_cfqq(cfqd, cfqq, "alloced");
2893 } else
2894 cfqq = &cfqd->oom_cfqq;
2895 }
2896
2897 if (new_cfqq)
2898 kmem_cache_free(cfq_pool, new_cfqq);
2899
2900 return cfqq;
2901}
2902
2903static struct cfq_queue **
2904cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
2905{
2906 switch (ioprio_class) {
2907 case IOPRIO_CLASS_RT:
2908 return &cfqd->async_cfqq[0][ioprio];
2909 case IOPRIO_CLASS_BE:
2910 return &cfqd->async_cfqq[1][ioprio];
2911 case IOPRIO_CLASS_IDLE:
2912 return &cfqd->async_idle_cfqq;
2913 default:
2914 BUG();
2915 }
2916}
2917
2918static struct cfq_queue *
2919cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc,
2920 gfp_t gfp_mask)
2921{
2922 const int ioprio = task_ioprio(ioc);
2923 const int ioprio_class = task_ioprio_class(ioc);
2924 struct cfq_queue **async_cfqq = NULL;
2925 struct cfq_queue *cfqq = NULL;
2926
2927 if (!is_sync) {
2928 async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio);
2929 cfqq = *async_cfqq;
2930 }
2931
2932 if (!cfqq)
2933 cfqq = cfq_find_alloc_queue(cfqd, is_sync, ioc, gfp_mask);
2934
2935 /*
2936 * pin the queue now that it's allocated, scheduler exit will prune it
2937 */
2938 if (!is_sync && !(*async_cfqq)) {
2939 cfqq->ref++;
2940 *async_cfqq = cfqq;
2941 }
2942
2943 cfqq->ref++;
2944 return cfqq;
2945}
2946
2947/**
2948 * cfq_create_cic - create and link a cfq_io_cq
2949 * @cfqd: cfqd of interest
2950 * @gfp_mask: allocation mask
2951 *
2952 * Make sure cfq_io_cq linking %current->io_context and @cfqd exists. If
2953 * ioc and/or cic doesn't exist, they will be created using @gfp_mask.
2954 */
2955static int cfq_create_cic(struct cfq_data *cfqd, gfp_t gfp_mask)
2956{
2957 struct request_queue *q = cfqd->queue;
2958 struct io_cq *icq = NULL;
2959 struct cfq_io_cq *cic;
2960 struct io_context *ioc;
2961 int ret = -ENOMEM;
2962
2963 might_sleep_if(gfp_mask & __GFP_WAIT);
2964
2965 /* allocate stuff */
2966 ioc = create_io_context(current, gfp_mask, q->node);
2967 if (!ioc)
2968 goto out;
2969
2970 cic = cfq_alloc_cic(cfqd, gfp_mask);
2971 if (!cic)
2972 goto out;
2973 icq = &cic->icq;
2974
2975 ret = radix_tree_preload(gfp_mask);
2976 if (ret)
2977 goto out;
2978
2979 icq->ioc = ioc;
2980 icq->q = cfqd->queue;
2981
2982 /* lock both q and ioc and try to link @icq */
2983 spin_lock_irq(q->queue_lock);
2984 spin_lock(&ioc->lock);
2985
2986 ret = radix_tree_insert(&ioc->icq_tree, q->id, icq);
2987 if (likely(!ret)) {
2988 hlist_add_head(&icq->ioc_node, &ioc->icq_list);
2989 list_add(&icq->q_node, &q->icq_list);
2990 icq = NULL;
2991 } else if (ret == -EEXIST) {
2992 /* someone else already did it */
2993 ret = 0;
2994 }
2995
2996 spin_unlock(&ioc->lock);
2997 spin_unlock_irq(q->queue_lock);
2998
2999 radix_tree_preload_end();
3000out:
3001 if (ret)
3002 printk(KERN_ERR "cfq: icq link failed!\n");
3003 if (icq)
3004 kmem_cache_free(cfq_icq_pool, icq);
3005 return ret;
3006}
3007
3008/**
3009 * cfq_get_cic - acquire cfq_io_cq and bump refcnt on io_context
3010 * @cfqd: cfqd to setup cic for
3011 * @gfp_mask: allocation mask
3012 *
3013 * Return cfq_io_cq associating @cfqd and %current->io_context and
3014 * bump refcnt on io_context. If ioc or cic doesn't exist, they're created
3015 * using @gfp_mask.
3016 *
3017 * Must be called under queue_lock which may be released and re-acquired.
3018 * This function also may sleep depending on @gfp_mask.
3019 */
3020static struct cfq_io_cq *cfq_get_cic(struct cfq_data *cfqd, gfp_t gfp_mask)
3021{
3022 struct request_queue *q = cfqd->queue;
3023 struct cfq_io_cq *cic = NULL;
3024 struct io_context *ioc;
3025 int err;
3026
3027 lockdep_assert_held(q->queue_lock);
3028
3029 while (true) {
3030 /* fast path */
3031 ioc = current->io_context;
3032 if (likely(ioc)) {
3033 cic = cfq_cic_lookup(cfqd, ioc);
3034 if (likely(cic))
3035 break;
3036 }
3037
3038 /* slow path - unlock, create missing ones and retry */
3039 spin_unlock_irq(q->queue_lock);
3040 err = cfq_create_cic(cfqd, gfp_mask);
3041 spin_lock_irq(q->queue_lock);
3042 if (err)
3043 return NULL;
3044 }
3045
3046 /* bump @ioc's refcnt and handle changed notifications */
3047 get_io_context(ioc);
3048
3049 if (unlikely(cic->icq.changed)) {
3050 if (test_and_clear_bit(ICQ_IOPRIO_CHANGED, &cic->icq.changed))
3051 changed_ioprio(cic);
3052#ifdef CONFIG_CFQ_GROUP_IOSCHED
3053 if (test_and_clear_bit(ICQ_CGROUP_CHANGED, &cic->icq.changed))
3054 changed_cgroup(cic);
3055#endif
3056 }
3057
3058 return cic;
3059}
3060
3061static void
3062__cfq_update_io_thinktime(struct cfq_ttime *ttime, unsigned long slice_idle)
3063{
3064 unsigned long elapsed = jiffies - ttime->last_end_request;
3065 elapsed = min(elapsed, 2UL * slice_idle);
3066
3067 ttime->ttime_samples = (7*ttime->ttime_samples + 256) / 8;
3068 ttime->ttime_total = (7*ttime->ttime_total + 256*elapsed) / 8;
3069 ttime->ttime_mean = (ttime->ttime_total + 128) / ttime->ttime_samples;
3070}
3071
3072static void
3073cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3074 struct cfq_io_cq *cic)
3075{
3076 if (cfq_cfqq_sync(cfqq)) {
3077 __cfq_update_io_thinktime(&cic->ttime, cfqd->cfq_slice_idle);
3078 __cfq_update_io_thinktime(&cfqq->service_tree->ttime,
3079 cfqd->cfq_slice_idle);
3080 }
3081#ifdef CONFIG_CFQ_GROUP_IOSCHED
3082 __cfq_update_io_thinktime(&cfqq->cfqg->ttime, cfqd->cfq_group_idle);
3083#endif
3084}
3085
3086static void
3087cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3088 struct request *rq)
3089{
3090 sector_t sdist = 0;
3091 sector_t n_sec = blk_rq_sectors(rq);
3092 if (cfqq->last_request_pos) {
3093 if (cfqq->last_request_pos < blk_rq_pos(rq))
3094 sdist = blk_rq_pos(rq) - cfqq->last_request_pos;
3095 else
3096 sdist = cfqq->last_request_pos - blk_rq_pos(rq);
3097 }
3098
3099 cfqq->seek_history <<= 1;
3100 if (blk_queue_nonrot(cfqd->queue))
3101 cfqq->seek_history |= (n_sec < CFQQ_SECT_THR_NONROT);
3102 else
3103 cfqq->seek_history |= (sdist > CFQQ_SEEK_THR);
3104}
3105
3106/*
3107 * Disable idle window if the process thinks too long or seeks so much that
3108 * it doesn't matter
3109 */
3110static void
3111cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3112 struct cfq_io_cq *cic)
3113{
3114 int old_idle, enable_idle;
3115
3116 /*
3117 * Don't idle for async or idle io prio class
3118 */
3119 if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq))
3120 return;
3121
3122 enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
3123
3124 if (cfqq->queued[0] + cfqq->queued[1] >= 4)
3125 cfq_mark_cfqq_deep(cfqq);
3126
3127 if (cfqq->next_rq && (cfqq->next_rq->cmd_flags & REQ_NOIDLE))
3128 enable_idle = 0;
3129 else if (!atomic_read(&cic->icq.ioc->nr_tasks) ||
3130 !cfqd->cfq_slice_idle ||
3131 (!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq)))
3132 enable_idle = 0;
3133 else if (sample_valid(cic->ttime.ttime_samples)) {
3134 if (cic->ttime.ttime_mean > cfqd->cfq_slice_idle)
3135 enable_idle = 0;
3136 else
3137 enable_idle = 1;
3138 }
3139
3140 if (old_idle != enable_idle) {
3141 cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle);
3142 if (enable_idle)
3143 cfq_mark_cfqq_idle_window(cfqq);
3144 else
3145 cfq_clear_cfqq_idle_window(cfqq);
3146 }
3147}
3148
3149/*
3150 * Check if new_cfqq should preempt the currently active queue. Return 0 for
3151 * no or if we aren't sure, a 1 will cause a preempt.
3152 */
3153static bool
3154cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
3155 struct request *rq)
3156{
3157 struct cfq_queue *cfqq;
3158
3159 cfqq = cfqd->active_queue;
3160 if (!cfqq)
3161 return false;
3162
3163 if (cfq_class_idle(new_cfqq))
3164 return false;
3165
3166 if (cfq_class_idle(cfqq))
3167 return true;
3168
3169 /*
3170 * Don't allow a non-RT request to preempt an ongoing RT cfqq timeslice.
3171 */
3172 if (cfq_class_rt(cfqq) && !cfq_class_rt(new_cfqq))
3173 return false;
3174
3175 /*
3176 * if the new request is sync, but the currently running queue is
3177 * not, let the sync request have priority.
3178 */
3179 if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
3180 return true;
3181
3182 if (new_cfqq->cfqg != cfqq->cfqg)
3183 return false;
3184
3185 if (cfq_slice_used(cfqq))
3186 return true;
3187
3188 /* Allow preemption only if we are idling on sync-noidle tree */
3189 if (cfqd->serving_type == SYNC_NOIDLE_WORKLOAD &&
3190 cfqq_type(new_cfqq) == SYNC_NOIDLE_WORKLOAD &&
3191 new_cfqq->service_tree->count == 2 &&
3192 RB_EMPTY_ROOT(&cfqq->sort_list))
3193 return true;
3194
3195 /*
3196 * So both queues are sync. Let the new request get disk time if
3197 * it's a metadata request and the current queue is doing regular IO.
3198 */
3199 if ((rq->cmd_flags & REQ_PRIO) && !cfqq->prio_pending)
3200 return true;
3201
3202 /*
3203 * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
3204 */
3205 if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
3206 return true;
3207
3208 /* An idle queue should not be idle now for some reason */
3209 if (RB_EMPTY_ROOT(&cfqq->sort_list) && !cfq_should_idle(cfqd, cfqq))
3210 return true;
3211
3212 if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
3213 return false;
3214
3215 /*
3216 * if this request is as-good as one we would expect from the
3217 * current cfqq, let it preempt
3218 */
3219 if (cfq_rq_close(cfqd, cfqq, rq))
3220 return true;
3221
3222 return false;
3223}
3224
3225/*
3226 * cfqq preempts the active queue. if we allowed preempt with no slice left,
3227 * let it have half of its nominal slice.
3228 */
3229static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3230{
3231 struct cfq_queue *old_cfqq = cfqd->active_queue;
3232
3233 cfq_log_cfqq(cfqd, cfqq, "preempt");
3234 cfq_slice_expired(cfqd, 1);
3235
3236 /*
3237 * workload type is changed, don't save slice, otherwise preempt
3238 * doesn't happen
3239 */
3240 if (cfqq_type(old_cfqq) != cfqq_type(cfqq))
3241 cfqq->cfqg->saved_workload_slice = 0;
3242
3243 /*
3244 * Put the new queue at the front of the of the current list,
3245 * so we know that it will be selected next.
3246 */
3247 BUG_ON(!cfq_cfqq_on_rr(cfqq));
3248
3249 cfq_service_tree_add(cfqd, cfqq, 1);
3250
3251 cfqq->slice_end = 0;
3252 cfq_mark_cfqq_slice_new(cfqq);
3253}
3254
3255/*
3256 * Called when a new fs request (rq) is added (to cfqq). Check if there's
3257 * something we should do about it
3258 */
3259static void
3260cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3261 struct request *rq)
3262{
3263 struct cfq_io_cq *cic = RQ_CIC(rq);
3264
3265 cfqd->rq_queued++;
3266 if (rq->cmd_flags & REQ_PRIO)
3267 cfqq->prio_pending++;
3268
3269 cfq_update_io_thinktime(cfqd, cfqq, cic);
3270 cfq_update_io_seektime(cfqd, cfqq, rq);
3271 cfq_update_idle_window(cfqd, cfqq, cic);
3272
3273 cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
3274
3275 if (cfqq == cfqd->active_queue) {
3276 /*
3277 * Remember that we saw a request from this process, but
3278 * don't start queuing just yet. Otherwise we risk seeing lots
3279 * of tiny requests, because we disrupt the normal plugging
3280 * and merging. If the request is already larger than a single
3281 * page, let it rip immediately. For that case we assume that
3282 * merging is already done. Ditto for a busy system that
3283 * has other work pending, don't risk delaying until the
3284 * idle timer unplug to continue working.
3285 */
3286 if (cfq_cfqq_wait_request(cfqq)) {
3287 if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
3288 cfqd->busy_queues > 1) {
3289 cfq_del_timer(cfqd, cfqq);
3290 cfq_clear_cfqq_wait_request(cfqq);
3291 __blk_run_queue(cfqd->queue);
3292 } else {
3293 cfq_blkiocg_update_idle_time_stats(
3294 &cfqq->cfqg->blkg);
3295 cfq_mark_cfqq_must_dispatch(cfqq);
3296 }
3297 }
3298 } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
3299 /*
3300 * not the active queue - expire current slice if it is
3301 * idle and has expired it's mean thinktime or this new queue
3302 * has some old slice time left and is of higher priority or
3303 * this new queue is RT and the current one is BE
3304 */
3305 cfq_preempt_queue(cfqd, cfqq);
3306 __blk_run_queue(cfqd->queue);
3307 }
3308}
3309
3310static void cfq_insert_request(struct request_queue *q, struct request *rq)
3311{
3312 struct cfq_data *cfqd = q->elevator->elevator_data;
3313 struct cfq_queue *cfqq = RQ_CFQQ(rq);
3314
3315 cfq_log_cfqq(cfqd, cfqq, "insert_request");
3316 cfq_init_prio_data(cfqq, RQ_CIC(rq)->icq.ioc);
3317
3318 rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
3319 list_add_tail(&rq->queuelist, &cfqq->fifo);
3320 cfq_add_rq_rb(rq);
3321 cfq_blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg,
3322 &cfqd->serving_group->blkg, rq_data_dir(rq),
3323 rq_is_sync(rq));
3324 cfq_rq_enqueued(cfqd, cfqq, rq);
3325}
3326
3327/*
3328 * Update hw_tag based on peak queue depth over 50 samples under
3329 * sufficient load.
3330 */
3331static void cfq_update_hw_tag(struct cfq_data *cfqd)
3332{
3333 struct cfq_queue *cfqq = cfqd->active_queue;
3334
3335 if (cfqd->rq_in_driver > cfqd->hw_tag_est_depth)
3336 cfqd->hw_tag_est_depth = cfqd->rq_in_driver;
3337
3338 if (cfqd->hw_tag == 1)
3339 return;
3340
3341 if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN &&
3342 cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN)
3343 return;
3344
3345 /*
3346 * If active queue hasn't enough requests and can idle, cfq might not
3347 * dispatch sufficient requests to hardware. Don't zero hw_tag in this
3348 * case
3349 */
3350 if (cfqq && cfq_cfqq_idle_window(cfqq) &&
3351 cfqq->dispatched + cfqq->queued[0] + cfqq->queued[1] <
3352 CFQ_HW_QUEUE_MIN && cfqd->rq_in_driver < CFQ_HW_QUEUE_MIN)
3353 return;
3354
3355 if (cfqd->hw_tag_samples++ < 50)
3356 return;
3357
3358 if (cfqd->hw_tag_est_depth >= CFQ_HW_QUEUE_MIN)
3359 cfqd->hw_tag = 1;
3360 else
3361 cfqd->hw_tag = 0;
3362}
3363
3364static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3365{
3366 struct cfq_io_cq *cic = cfqd->active_cic;
3367
3368 /* If the queue already has requests, don't wait */
3369 if (!RB_EMPTY_ROOT(&cfqq->sort_list))
3370 return false;
3371
3372 /* If there are other queues in the group, don't wait */
3373 if (cfqq->cfqg->nr_cfqq > 1)
3374 return false;
3375
3376 /* the only queue in the group, but think time is big */
3377 if (cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true))
3378 return false;
3379
3380 if (cfq_slice_used(cfqq))
3381 return true;
3382
3383 /* if slice left is less than think time, wait busy */
3384 if (cic && sample_valid(cic->ttime.ttime_samples)
3385 && (cfqq->slice_end - jiffies < cic->ttime.ttime_mean))
3386 return true;
3387
3388 /*
3389 * If think times is less than a jiffy than ttime_mean=0 and above
3390 * will not be true. It might happen that slice has not expired yet
3391 * but will expire soon (4-5 ns) during select_queue(). To cover the
3392 * case where think time is less than a jiffy, mark the queue wait
3393 * busy if only 1 jiffy is left in the slice.
3394 */
3395 if (cfqq->slice_end - jiffies == 1)
3396 return true;
3397
3398 return false;
3399}
3400
3401static void cfq_completed_request(struct request_queue *q, struct request *rq)
3402{
3403 struct cfq_queue *cfqq = RQ_CFQQ(rq);
3404 struct cfq_data *cfqd = cfqq->cfqd;
3405 const int sync = rq_is_sync(rq);
3406 unsigned long now;
3407
3408 now = jiffies;
3409 cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d",
3410 !!(rq->cmd_flags & REQ_NOIDLE));
3411
3412 cfq_update_hw_tag(cfqd);
3413
3414 WARN_ON(!cfqd->rq_in_driver);
3415 WARN_ON(!cfqq->dispatched);
3416 cfqd->rq_in_driver--;
3417 cfqq->dispatched--;
3418 (RQ_CFQG(rq))->dispatched--;
3419 cfq_blkiocg_update_completion_stats(&cfqq->cfqg->blkg,
3420 rq_start_time_ns(rq), rq_io_start_time_ns(rq),
3421 rq_data_dir(rq), rq_is_sync(rq));
3422
3423 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
3424
3425 if (sync) {
3426 struct cfq_rb_root *service_tree;
3427
3428 RQ_CIC(rq)->ttime.last_end_request = now;
3429
3430 if (cfq_cfqq_on_rr(cfqq))
3431 service_tree = cfqq->service_tree;
3432 else
3433 service_tree = service_tree_for(cfqq->cfqg,
3434 cfqq_prio(cfqq), cfqq_type(cfqq));
3435 service_tree->ttime.last_end_request = now;
3436 if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now))
3437 cfqd->last_delayed_sync = now;
3438 }
3439
3440#ifdef CONFIG_CFQ_GROUP_IOSCHED
3441 cfqq->cfqg->ttime.last_end_request = now;
3442#endif
3443
3444 /*
3445 * If this is the active queue, check if it needs to be expired,
3446 * or if we want to idle in case it has no pending requests.
3447 */
3448 if (cfqd->active_queue == cfqq) {
3449 const bool cfqq_empty = RB_EMPTY_ROOT(&cfqq->sort_list);
3450
3451 if (cfq_cfqq_slice_new(cfqq)) {
3452 cfq_set_prio_slice(cfqd, cfqq);
3453 cfq_clear_cfqq_slice_new(cfqq);
3454 }
3455
3456 /*
3457 * Should we wait for next request to come in before we expire
3458 * the queue.
3459 */
3460 if (cfq_should_wait_busy(cfqd, cfqq)) {
3461 unsigned long extend_sl = cfqd->cfq_slice_idle;
3462 if (!cfqd->cfq_slice_idle)
3463 extend_sl = cfqd->cfq_group_idle;
3464 cfqq->slice_end = jiffies + extend_sl;
3465 cfq_mark_cfqq_wait_busy(cfqq);
3466 cfq_log_cfqq(cfqd, cfqq, "will busy wait");
3467 }
3468
3469 /*
3470 * Idling is not enabled on:
3471 * - expired queues
3472 * - idle-priority queues
3473 * - async queues
3474 * - queues with still some requests queued
3475 * - when there is a close cooperator
3476 */
3477 if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
3478 cfq_slice_expired(cfqd, 1);
3479 else if (sync && cfqq_empty &&
3480 !cfq_close_cooperator(cfqd, cfqq)) {
3481 cfq_arm_slice_timer(cfqd);
3482 }
3483 }
3484
3485 if (!cfqd->rq_in_driver)
3486 cfq_schedule_dispatch(cfqd);
3487}
3488
3489static inline int __cfq_may_queue(struct cfq_queue *cfqq)
3490{
3491 if (cfq_cfqq_wait_request(cfqq) && !cfq_cfqq_must_alloc_slice(cfqq)) {
3492 cfq_mark_cfqq_must_alloc_slice(cfqq);
3493 return ELV_MQUEUE_MUST;
3494 }
3495
3496 return ELV_MQUEUE_MAY;
3497}
3498
3499static int cfq_may_queue(struct request_queue *q, int rw)
3500{
3501 struct cfq_data *cfqd = q->elevator->elevator_data;
3502 struct task_struct *tsk = current;
3503 struct cfq_io_cq *cic;
3504 struct cfq_queue *cfqq;
3505
3506 /*
3507 * don't force setup of a queue from here, as a call to may_queue
3508 * does not necessarily imply that a request actually will be queued.
3509 * so just lookup a possibly existing queue, or return 'may queue'
3510 * if that fails
3511 */
3512 cic = cfq_cic_lookup(cfqd, tsk->io_context);
3513 if (!cic)
3514 return ELV_MQUEUE_MAY;
3515
3516 cfqq = cic_to_cfqq(cic, rw_is_sync(rw));
3517 if (cfqq) {
3518 cfq_init_prio_data(cfqq, cic->icq.ioc);
3519
3520 return __cfq_may_queue(cfqq);
3521 }
3522
3523 return ELV_MQUEUE_MAY;
3524}
3525
3526/*
3527 * queue lock held here
3528 */
3529static void cfq_put_request(struct request *rq)
3530{
3531 struct cfq_queue *cfqq = RQ_CFQQ(rq);
3532
3533 if (cfqq) {
3534 const int rw = rq_data_dir(rq);
3535
3536 BUG_ON(!cfqq->allocated[rw]);
3537 cfqq->allocated[rw]--;
3538
3539 put_io_context(RQ_CIC(rq)->icq.ioc, cfqq->cfqd->queue);
3540
3541 /* Put down rq reference on cfqg */
3542 cfq_put_cfqg(RQ_CFQG(rq));
3543 rq->elv.priv[0] = NULL;
3544 rq->elv.priv[1] = NULL;
3545
3546 cfq_put_queue(cfqq);
3547 }
3548}
3549
3550static struct cfq_queue *
3551cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_cq *cic,
3552 struct cfq_queue *cfqq)
3553{
3554 cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq);
3555 cic_set_cfqq(cic, cfqq->new_cfqq, 1);
3556 cfq_mark_cfqq_coop(cfqq->new_cfqq);
3557 cfq_put_queue(cfqq);
3558 return cic_to_cfqq(cic, 1);
3559}
3560
3561/*
3562 * Returns NULL if a new cfqq should be allocated, or the old cfqq if this
3563 * was the last process referring to said cfqq.
3564 */
3565static struct cfq_queue *
3566split_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq)
3567{
3568 if (cfqq_process_refs(cfqq) == 1) {
3569 cfqq->pid = current->pid;
3570 cfq_clear_cfqq_coop(cfqq);
3571 cfq_clear_cfqq_split_coop(cfqq);
3572 return cfqq;
3573 }
3574
3575 cic_set_cfqq(cic, NULL, 1);
3576
3577 cfq_put_cooperator(cfqq);
3578
3579 cfq_put_queue(cfqq);
3580 return NULL;
3581}
3582/*
3583 * Allocate cfq data structures associated with this request.
3584 */
3585static int
3586cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
3587{
3588 struct cfq_data *cfqd = q->elevator->elevator_data;
3589 struct cfq_io_cq *cic;
3590 const int rw = rq_data_dir(rq);
3591 const bool is_sync = rq_is_sync(rq);
3592 struct cfq_queue *cfqq;
3593
3594 might_sleep_if(gfp_mask & __GFP_WAIT);
3595
3596 spin_lock_irq(q->queue_lock);
3597 cic = cfq_get_cic(cfqd, gfp_mask);
3598 if (!cic)
3599 goto queue_fail;
3600
3601new_queue:
3602 cfqq = cic_to_cfqq(cic, is_sync);
3603 if (!cfqq || cfqq == &cfqd->oom_cfqq) {
3604 cfqq = cfq_get_queue(cfqd, is_sync, cic->icq.ioc, gfp_mask);
3605 cic_set_cfqq(cic, cfqq, is_sync);
3606 } else {
3607 /*
3608 * If the queue was seeky for too long, break it apart.
3609 */
3610 if (cfq_cfqq_coop(cfqq) && cfq_cfqq_split_coop(cfqq)) {
3611 cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq");
3612 cfqq = split_cfqq(cic, cfqq);
3613 if (!cfqq)
3614 goto new_queue;
3615 }
3616
3617 /*
3618 * Check to see if this queue is scheduled to merge with
3619 * another, closely cooperating queue. The merging of
3620 * queues happens here as it must be done in process context.
3621 * The reference on new_cfqq was taken in merge_cfqqs.
3622 */
3623 if (cfqq->new_cfqq)
3624 cfqq = cfq_merge_cfqqs(cfqd, cic, cfqq);
3625 }
3626
3627 cfqq->allocated[rw]++;
3628
3629 cfqq->ref++;
3630 rq->elv.icq = &cic->icq;
3631 rq->elv.priv[0] = cfqq;
3632 rq->elv.priv[1] = cfq_ref_get_cfqg(cfqq->cfqg);
3633 spin_unlock_irq(q->queue_lock);
3634 return 0;
3635
3636queue_fail:
3637 cfq_schedule_dispatch(cfqd);
3638 spin_unlock_irq(q->queue_lock);
3639 cfq_log(cfqd, "set_request fail");
3640 return 1;
3641}
3642
3643static void cfq_kick_queue(struct work_struct *work)
3644{
3645 struct cfq_data *cfqd =
3646 container_of(work, struct cfq_data, unplug_work);
3647 struct request_queue *q = cfqd->queue;
3648
3649 spin_lock_irq(q->queue_lock);
3650 __blk_run_queue(cfqd->queue);
3651 spin_unlock_irq(q->queue_lock);
3652}
3653
3654/*
3655 * Timer running if the active_queue is currently idling inside its time slice
3656 */
3657static void cfq_idle_slice_timer(unsigned long data)
3658{
3659 struct cfq_data *cfqd = (struct cfq_data *) data;
3660 struct cfq_queue *cfqq;
3661 unsigned long flags;
3662 int timed_out = 1;
3663
3664 cfq_log(cfqd, "idle timer fired");
3665
3666 spin_lock_irqsave(cfqd->queue->queue_lock, flags);
3667
3668 cfqq = cfqd->active_queue;
3669 if (cfqq) {
3670 timed_out = 0;
3671
3672 /*
3673 * We saw a request before the queue expired, let it through
3674 */
3675 if (cfq_cfqq_must_dispatch(cfqq))
3676 goto out_kick;
3677
3678 /*
3679 * expired
3680 */
3681 if (cfq_slice_used(cfqq))
3682 goto expire;
3683
3684 /*
3685 * only expire and reinvoke request handler, if there are
3686 * other queues with pending requests
3687 */
3688 if (!cfqd->busy_queues)
3689 goto out_cont;
3690
3691 /*
3692 * not expired and it has a request pending, let it dispatch
3693 */
3694 if (!RB_EMPTY_ROOT(&cfqq->sort_list))
3695 goto out_kick;
3696
3697 /*
3698 * Queue depth flag is reset only when the idle didn't succeed
3699 */
3700 cfq_clear_cfqq_deep(cfqq);
3701 }
3702expire:
3703 cfq_slice_expired(cfqd, timed_out);
3704out_kick:
3705 cfq_schedule_dispatch(cfqd);
3706out_cont:
3707 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
3708}
3709
3710static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
3711{
3712 del_timer_sync(&cfqd->idle_slice_timer);
3713 cancel_work_sync(&cfqd->unplug_work);
3714}
3715
3716static void cfq_put_async_queues(struct cfq_data *cfqd)
3717{
3718 int i;
3719
3720 for (i = 0; i < IOPRIO_BE_NR; i++) {
3721 if (cfqd->async_cfqq[0][i])
3722 cfq_put_queue(cfqd->async_cfqq[0][i]);
3723 if (cfqd->async_cfqq[1][i])
3724 cfq_put_queue(cfqd->async_cfqq[1][i]);
3725 }
3726
3727 if (cfqd->async_idle_cfqq)
3728 cfq_put_queue(cfqd->async_idle_cfqq);
3729}
3730
3731static void cfq_exit_queue(struct elevator_queue *e)
3732{
3733 struct cfq_data *cfqd = e->elevator_data;
3734 struct request_queue *q = cfqd->queue;
3735 bool wait = false;
3736
3737 cfq_shutdown_timer_wq(cfqd);
3738
3739 spin_lock_irq(q->queue_lock);
3740
3741 if (cfqd->active_queue)
3742 __cfq_slice_expired(cfqd, cfqd->active_queue, 0);
3743
3744 cfq_put_async_queues(cfqd);
3745 cfq_release_cfq_groups(cfqd);
3746
3747 /*
3748 * If there are groups which we could not unlink from blkcg list,
3749 * wait for a rcu period for them to be freed.
3750 */
3751 if (cfqd->nr_blkcg_linked_grps)
3752 wait = true;
3753
3754 spin_unlock_irq(q->queue_lock);
3755
3756 cfq_shutdown_timer_wq(cfqd);
3757
3758 /*
3759 * Wait for cfqg->blkg->key accessors to exit their grace periods.
3760 * Do this wait only if there are other unlinked groups out
3761 * there. This can happen if cgroup deletion path claimed the
3762 * responsibility of cleaning up a group before queue cleanup code
3763 * get to the group.
3764 *
3765 * Do not call synchronize_rcu() unconditionally as there are drivers
3766 * which create/delete request queue hundreds of times during scan/boot
3767 * and synchronize_rcu() can take significant time and slow down boot.
3768 */
3769 if (wait)
3770 synchronize_rcu();
3771
3772#ifdef CONFIG_CFQ_GROUP_IOSCHED
3773 /* Free up per cpu stats for root group */
3774 free_percpu(cfqd->root_group.blkg.stats_cpu);
3775#endif
3776 kfree(cfqd);
3777}
3778
3779static void *cfq_init_queue(struct request_queue *q)
3780{
3781 struct cfq_data *cfqd;
3782 int i, j;
3783 struct cfq_group *cfqg;
3784 struct cfq_rb_root *st;
3785
3786 cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
3787 if (!cfqd)
3788 return NULL;
3789
3790 /* Init root service tree */
3791 cfqd->grp_service_tree = CFQ_RB_ROOT;
3792
3793 /* Init root group */
3794 cfqg = &cfqd->root_group;
3795 for_each_cfqg_st(cfqg, i, j, st)
3796 *st = CFQ_RB_ROOT;
3797 RB_CLEAR_NODE(&cfqg->rb_node);
3798
3799 /* Give preference to root group over other groups */
3800 cfqg->weight = 2*BLKIO_WEIGHT_DEFAULT;
3801
3802#ifdef CONFIG_CFQ_GROUP_IOSCHED
3803 /*
3804 * Set root group reference to 2. One reference will be dropped when
3805 * all groups on cfqd->cfqg_list are being deleted during queue exit.
3806 * Other reference will remain there as we don't want to delete this
3807 * group as it is statically allocated and gets destroyed when
3808 * throtl_data goes away.
3809 */
3810 cfqg->ref = 2;
3811
3812 if (blkio_alloc_blkg_stats(&cfqg->blkg)) {
3813 kfree(cfqg);
3814 kfree(cfqd);
3815 return NULL;
3816 }
3817
3818 rcu_read_lock();
3819
3820 cfq_blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg,
3821 (void *)cfqd, 0);
3822 rcu_read_unlock();
3823 cfqd->nr_blkcg_linked_grps++;
3824
3825 /* Add group on cfqd->cfqg_list */
3826 hlist_add_head(&cfqg->cfqd_node, &cfqd->cfqg_list);
3827#endif
3828 /*
3829 * Not strictly needed (since RB_ROOT just clears the node and we
3830 * zeroed cfqd on alloc), but better be safe in case someone decides
3831 * to add magic to the rb code
3832 */
3833 for (i = 0; i < CFQ_PRIO_LISTS; i++)
3834 cfqd->prio_trees[i] = RB_ROOT;
3835
3836 /*
3837 * Our fallback cfqq if cfq_find_alloc_queue() runs into OOM issues.
3838 * Grab a permanent reference to it, so that the normal code flow
3839 * will not attempt to free it.
3840 */
3841 cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
3842 cfqd->oom_cfqq.ref++;
3843 cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, &cfqd->root_group);
3844
3845 cfqd->queue = q;
3846
3847 init_timer(&cfqd->idle_slice_timer);
3848 cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
3849 cfqd->idle_slice_timer.data = (unsigned long) cfqd;
3850
3851 INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
3852
3853 cfqd->cfq_quantum = cfq_quantum;
3854 cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
3855 cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
3856 cfqd->cfq_back_max = cfq_back_max;
3857 cfqd->cfq_back_penalty = cfq_back_penalty;
3858 cfqd->cfq_slice[0] = cfq_slice_async;
3859 cfqd->cfq_slice[1] = cfq_slice_sync;
3860 cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
3861 cfqd->cfq_slice_idle = cfq_slice_idle;
3862 cfqd->cfq_group_idle = cfq_group_idle;
3863 cfqd->cfq_latency = 1;
3864 cfqd->hw_tag = -1;
3865 /*
3866 * we optimistically start assuming sync ops weren't delayed in last
3867 * second, in order to have larger depth for async operations.
3868 */
3869 cfqd->last_delayed_sync = jiffies - HZ;
3870 return cfqd;
3871}
3872
3873/*
3874 * sysfs parts below -->
3875 */
3876static ssize_t
3877cfq_var_show(unsigned int var, char *page)
3878{
3879 return sprintf(page, "%d\n", var);
3880}
3881
3882static ssize_t
3883cfq_var_store(unsigned int *var, const char *page, size_t count)
3884{
3885 char *p = (char *) page;
3886
3887 *var = simple_strtoul(p, &p, 10);
3888 return count;
3889}
3890
3891#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
3892static ssize_t __FUNC(struct elevator_queue *e, char *page) \
3893{ \
3894 struct cfq_data *cfqd = e->elevator_data; \
3895 unsigned int __data = __VAR; \
3896 if (__CONV) \
3897 __data = jiffies_to_msecs(__data); \
3898 return cfq_var_show(__data, (page)); \
3899}
3900SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
3901SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
3902SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
3903SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
3904SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
3905SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
3906SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1);
3907SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
3908SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
3909SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
3910SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
3911#undef SHOW_FUNCTION
3912
3913#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
3914static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
3915{ \
3916 struct cfq_data *cfqd = e->elevator_data; \
3917 unsigned int __data; \
3918 int ret = cfq_var_store(&__data, (page), count); \
3919 if (__data < (MIN)) \
3920 __data = (MIN); \
3921 else if (__data > (MAX)) \
3922 __data = (MAX); \
3923 if (__CONV) \
3924 *(__PTR) = msecs_to_jiffies(__data); \
3925 else \
3926 *(__PTR) = __data; \
3927 return ret; \
3928}
3929STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
3930STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1,
3931 UINT_MAX, 1);
3932STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1,
3933 UINT_MAX, 1);
3934STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
3935STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
3936 UINT_MAX, 0);
3937STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
3938STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1);
3939STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
3940STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
3941STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
3942 UINT_MAX, 0);
3943STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
3944#undef STORE_FUNCTION
3945
3946#define CFQ_ATTR(name) \
3947 __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)
3948
3949static struct elv_fs_entry cfq_attrs[] = {
3950 CFQ_ATTR(quantum),
3951 CFQ_ATTR(fifo_expire_sync),
3952 CFQ_ATTR(fifo_expire_async),
3953 CFQ_ATTR(back_seek_max),
3954 CFQ_ATTR(back_seek_penalty),
3955 CFQ_ATTR(slice_sync),
3956 CFQ_ATTR(slice_async),
3957 CFQ_ATTR(slice_async_rq),
3958 CFQ_ATTR(slice_idle),
3959 CFQ_ATTR(group_idle),
3960 CFQ_ATTR(low_latency),
3961 __ATTR_NULL
3962};
3963
3964static struct elevator_type iosched_cfq = {
3965 .ops = {
3966 .elevator_merge_fn = cfq_merge,
3967 .elevator_merged_fn = cfq_merged_request,
3968 .elevator_merge_req_fn = cfq_merged_requests,
3969 .elevator_allow_merge_fn = cfq_allow_merge,
3970 .elevator_bio_merged_fn = cfq_bio_merged,
3971 .elevator_dispatch_fn = cfq_dispatch_requests,
3972 .elevator_add_req_fn = cfq_insert_request,
3973 .elevator_activate_req_fn = cfq_activate_request,
3974 .elevator_deactivate_req_fn = cfq_deactivate_request,
3975 .elevator_completed_req_fn = cfq_completed_request,
3976 .elevator_former_req_fn = elv_rb_former_request,
3977 .elevator_latter_req_fn = elv_rb_latter_request,
3978 .elevator_exit_icq_fn = cfq_exit_icq,
3979 .elevator_set_req_fn = cfq_set_request,
3980 .elevator_put_req_fn = cfq_put_request,
3981 .elevator_may_queue_fn = cfq_may_queue,
3982 .elevator_init_fn = cfq_init_queue,
3983 .elevator_exit_fn = cfq_exit_queue,
3984 },
3985 .icq_size = sizeof(struct cfq_io_cq),
3986 .icq_align = __alignof__(struct cfq_io_cq),
3987 .elevator_attrs = cfq_attrs,
3988 .elevator_name = "cfq",
3989 .elevator_owner = THIS_MODULE,
3990};
3991
3992#ifdef CONFIG_CFQ_GROUP_IOSCHED
3993static struct blkio_policy_type blkio_policy_cfq = {
3994 .ops = {
3995 .blkio_unlink_group_fn = cfq_unlink_blkio_group,
3996 .blkio_update_group_weight_fn = cfq_update_blkio_group_weight,
3997 },
3998 .plid = BLKIO_POLICY_PROP,
3999};
4000#else
4001static struct blkio_policy_type blkio_policy_cfq;
4002#endif
4003
4004static int __init cfq_init(void)
4005{
4006 int ret;
4007
4008 /*
4009 * could be 0 on HZ < 1000 setups
4010 */
4011 if (!cfq_slice_async)
4012 cfq_slice_async = 1;
4013 if (!cfq_slice_idle)
4014 cfq_slice_idle = 1;
4015
4016#ifdef CONFIG_CFQ_GROUP_IOSCHED
4017 if (!cfq_group_idle)
4018 cfq_group_idle = 1;
4019#else
4020 cfq_group_idle = 0;
4021#endif
4022 cfq_pool = KMEM_CACHE(cfq_queue, 0);
4023 if (!cfq_pool)
4024 return -ENOMEM;
4025
4026 ret = elv_register(&iosched_cfq);
4027 if (ret) {
4028 kmem_cache_destroy(cfq_pool);
4029 return ret;
4030 }
4031 cfq_icq_pool = iosched_cfq.icq_cache;
4032
4033 blkio_policy_register(&blkio_policy_cfq);
4034
4035 return 0;
4036}
4037
4038static void __exit cfq_exit(void)
4039{
4040 blkio_policy_unregister(&blkio_policy_cfq);
4041 elv_unregister(&iosched_cfq);
4042 kmem_cache_destroy(cfq_pool);
4043}
4044
4045module_init(cfq_init);
4046module_exit(cfq_exit);
4047
4048MODULE_AUTHOR("Jens Axboe");
4049MODULE_LICENSE("GPL");
4050MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");