Merge tag 'for-linus-20180825' of git://git.kernel.dk/linux-block
[linux-block.git] / block / blk-wbt.c
... / ...
CommitLineData
1/*
2 * buffered writeback throttling. loosely based on CoDel. We can't drop
3 * packets for IO scheduling, so the logic is something like this:
4 *
5 * - Monitor latencies in a defined window of time.
6 * - If the minimum latency in the above window exceeds some target, increment
7 * scaling step and scale down queue depth by a factor of 2x. The monitoring
8 * window is then shrunk to 100 / sqrt(scaling step + 1).
9 * - For any window where we don't have solid data on what the latencies
10 * look like, retain status quo.
11 * - If latencies look good, decrement scaling step.
12 * - If we're only doing writes, allow the scaling step to go negative. This
13 * will temporarily boost write performance, snapping back to a stable
14 * scaling step of 0 if reads show up or the heavy writers finish. Unlike
15 * positive scaling steps where we shrink the monitoring window, a negative
16 * scaling step retains the default step==0 window size.
17 *
18 * Copyright (C) 2016 Jens Axboe
19 *
20 */
21#include <linux/kernel.h>
22#include <linux/blk_types.h>
23#include <linux/slab.h>
24#include <linux/backing-dev.h>
25#include <linux/swap.h>
26
27#include "blk-wbt.h"
28#include "blk-rq-qos.h"
29
30#define CREATE_TRACE_POINTS
31#include <trace/events/wbt.h>
32
33static inline void wbt_clear_state(struct request *rq)
34{
35 rq->wbt_flags = 0;
36}
37
38static inline enum wbt_flags wbt_flags(struct request *rq)
39{
40 return rq->wbt_flags;
41}
42
43static inline bool wbt_is_tracked(struct request *rq)
44{
45 return rq->wbt_flags & WBT_TRACKED;
46}
47
48static inline bool wbt_is_read(struct request *rq)
49{
50 return rq->wbt_flags & WBT_READ;
51}
52
53enum {
54 /*
55 * Default setting, we'll scale up (to 75% of QD max) or down (min 1)
56 * from here depending on device stats
57 */
58 RWB_DEF_DEPTH = 16,
59
60 /*
61 * 100msec window
62 */
63 RWB_WINDOW_NSEC = 100 * 1000 * 1000ULL,
64
65 /*
66 * Disregard stats, if we don't meet this minimum
67 */
68 RWB_MIN_WRITE_SAMPLES = 3,
69
70 /*
71 * If we have this number of consecutive windows with not enough
72 * information to scale up or down, scale up.
73 */
74 RWB_UNKNOWN_BUMP = 5,
75};
76
77static inline bool rwb_enabled(struct rq_wb *rwb)
78{
79 return rwb && rwb->wb_normal != 0;
80}
81
82static void wb_timestamp(struct rq_wb *rwb, unsigned long *var)
83{
84 if (rwb_enabled(rwb)) {
85 const unsigned long cur = jiffies;
86
87 if (cur != *var)
88 *var = cur;
89 }
90}
91
92/*
93 * If a task was rate throttled in balance_dirty_pages() within the last
94 * second or so, use that to indicate a higher cleaning rate.
95 */
96static bool wb_recent_wait(struct rq_wb *rwb)
97{
98 struct bdi_writeback *wb = &rwb->rqos.q->backing_dev_info->wb;
99
100 return time_before(jiffies, wb->dirty_sleep + HZ);
101}
102
103static inline struct rq_wait *get_rq_wait(struct rq_wb *rwb,
104 enum wbt_flags wb_acct)
105{
106 if (wb_acct & WBT_KSWAPD)
107 return &rwb->rq_wait[WBT_RWQ_KSWAPD];
108 else if (wb_acct & WBT_DISCARD)
109 return &rwb->rq_wait[WBT_RWQ_DISCARD];
110
111 return &rwb->rq_wait[WBT_RWQ_BG];
112}
113
114static void rwb_wake_all(struct rq_wb *rwb)
115{
116 int i;
117
118 for (i = 0; i < WBT_NUM_RWQ; i++) {
119 struct rq_wait *rqw = &rwb->rq_wait[i];
120
121 if (wq_has_sleeper(&rqw->wait))
122 wake_up_all(&rqw->wait);
123 }
124}
125
126static void __wbt_done(struct rq_qos *rqos, enum wbt_flags wb_acct)
127{
128 struct rq_wb *rwb = RQWB(rqos);
129 struct rq_wait *rqw;
130 int inflight, limit;
131
132 if (!(wb_acct & WBT_TRACKED))
133 return;
134
135 rqw = get_rq_wait(rwb, wb_acct);
136 inflight = atomic_dec_return(&rqw->inflight);
137
138 /*
139 * wbt got disabled with IO in flight. Wake up any potential
140 * waiters, we don't have to do more than that.
141 */
142 if (unlikely(!rwb_enabled(rwb))) {
143 rwb_wake_all(rwb);
144 return;
145 }
146
147 /*
148 * For discards, our limit is always the background. For writes, if
149 * the device does write back caching, drop further down before we
150 * wake people up.
151 */
152 if (wb_acct & WBT_DISCARD)
153 limit = rwb->wb_background;
154 else if (rwb->wc && !wb_recent_wait(rwb))
155 limit = 0;
156 else
157 limit = rwb->wb_normal;
158
159 /*
160 * Don't wake anyone up if we are above the normal limit.
161 */
162 if (inflight && inflight >= limit)
163 return;
164
165 if (wq_has_sleeper(&rqw->wait)) {
166 int diff = limit - inflight;
167
168 if (!inflight || diff >= rwb->wb_background / 2)
169 wake_up(&rqw->wait);
170 }
171}
172
173/*
174 * Called on completion of a request. Note that it's also called when
175 * a request is merged, when the request gets freed.
176 */
177static void wbt_done(struct rq_qos *rqos, struct request *rq)
178{
179 struct rq_wb *rwb = RQWB(rqos);
180
181 if (!wbt_is_tracked(rq)) {
182 if (rwb->sync_cookie == rq) {
183 rwb->sync_issue = 0;
184 rwb->sync_cookie = NULL;
185 }
186
187 if (wbt_is_read(rq))
188 wb_timestamp(rwb, &rwb->last_comp);
189 } else {
190 WARN_ON_ONCE(rq == rwb->sync_cookie);
191 __wbt_done(rqos, wbt_flags(rq));
192 }
193 wbt_clear_state(rq);
194}
195
196static inline bool stat_sample_valid(struct blk_rq_stat *stat)
197{
198 /*
199 * We need at least one read sample, and a minimum of
200 * RWB_MIN_WRITE_SAMPLES. We require some write samples to know
201 * that it's writes impacting us, and not just some sole read on
202 * a device that is in a lower power state.
203 */
204 return (stat[READ].nr_samples >= 1 &&
205 stat[WRITE].nr_samples >= RWB_MIN_WRITE_SAMPLES);
206}
207
208static u64 rwb_sync_issue_lat(struct rq_wb *rwb)
209{
210 u64 now, issue = READ_ONCE(rwb->sync_issue);
211
212 if (!issue || !rwb->sync_cookie)
213 return 0;
214
215 now = ktime_to_ns(ktime_get());
216 return now - issue;
217}
218
219enum {
220 LAT_OK = 1,
221 LAT_UNKNOWN,
222 LAT_UNKNOWN_WRITES,
223 LAT_EXCEEDED,
224};
225
226static int latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat)
227{
228 struct backing_dev_info *bdi = rwb->rqos.q->backing_dev_info;
229 struct rq_depth *rqd = &rwb->rq_depth;
230 u64 thislat;
231
232 /*
233 * If our stored sync issue exceeds the window size, or it
234 * exceeds our min target AND we haven't logged any entries,
235 * flag the latency as exceeded. wbt works off completion latencies,
236 * but for a flooded device, a single sync IO can take a long time
237 * to complete after being issued. If this time exceeds our
238 * monitoring window AND we didn't see any other completions in that
239 * window, then count that sync IO as a violation of the latency.
240 */
241 thislat = rwb_sync_issue_lat(rwb);
242 if (thislat > rwb->cur_win_nsec ||
243 (thislat > rwb->min_lat_nsec && !stat[READ].nr_samples)) {
244 trace_wbt_lat(bdi, thislat);
245 return LAT_EXCEEDED;
246 }
247
248 /*
249 * No read/write mix, if stat isn't valid
250 */
251 if (!stat_sample_valid(stat)) {
252 /*
253 * If we had writes in this stat window and the window is
254 * current, we're only doing writes. If a task recently
255 * waited or still has writes in flights, consider us doing
256 * just writes as well.
257 */
258 if (stat[WRITE].nr_samples || wb_recent_wait(rwb) ||
259 wbt_inflight(rwb))
260 return LAT_UNKNOWN_WRITES;
261 return LAT_UNKNOWN;
262 }
263
264 /*
265 * If the 'min' latency exceeds our target, step down.
266 */
267 if (stat[READ].min > rwb->min_lat_nsec) {
268 trace_wbt_lat(bdi, stat[READ].min);
269 trace_wbt_stat(bdi, stat);
270 return LAT_EXCEEDED;
271 }
272
273 if (rqd->scale_step)
274 trace_wbt_stat(bdi, stat);
275
276 return LAT_OK;
277}
278
279static void rwb_trace_step(struct rq_wb *rwb, const char *msg)
280{
281 struct backing_dev_info *bdi = rwb->rqos.q->backing_dev_info;
282 struct rq_depth *rqd = &rwb->rq_depth;
283
284 trace_wbt_step(bdi, msg, rqd->scale_step, rwb->cur_win_nsec,
285 rwb->wb_background, rwb->wb_normal, rqd->max_depth);
286}
287
288static void calc_wb_limits(struct rq_wb *rwb)
289{
290 if (rwb->min_lat_nsec == 0) {
291 rwb->wb_normal = rwb->wb_background = 0;
292 } else if (rwb->rq_depth.max_depth <= 2) {
293 rwb->wb_normal = rwb->rq_depth.max_depth;
294 rwb->wb_background = 1;
295 } else {
296 rwb->wb_normal = (rwb->rq_depth.max_depth + 1) / 2;
297 rwb->wb_background = (rwb->rq_depth.max_depth + 3) / 4;
298 }
299}
300
301static void scale_up(struct rq_wb *rwb)
302{
303 rq_depth_scale_up(&rwb->rq_depth);
304 calc_wb_limits(rwb);
305 rwb->unknown_cnt = 0;
306 rwb_trace_step(rwb, "scale up");
307}
308
309static void scale_down(struct rq_wb *rwb, bool hard_throttle)
310{
311 rq_depth_scale_down(&rwb->rq_depth, hard_throttle);
312 calc_wb_limits(rwb);
313 rwb->unknown_cnt = 0;
314 rwb_wake_all(rwb);
315 rwb_trace_step(rwb, "scale down");
316}
317
318static void rwb_arm_timer(struct rq_wb *rwb)
319{
320 struct rq_depth *rqd = &rwb->rq_depth;
321
322 if (rqd->scale_step > 0) {
323 /*
324 * We should speed this up, using some variant of a fast
325 * integer inverse square root calculation. Since we only do
326 * this for every window expiration, it's not a huge deal,
327 * though.
328 */
329 rwb->cur_win_nsec = div_u64(rwb->win_nsec << 4,
330 int_sqrt((rqd->scale_step + 1) << 8));
331 } else {
332 /*
333 * For step < 0, we don't want to increase/decrease the
334 * window size.
335 */
336 rwb->cur_win_nsec = rwb->win_nsec;
337 }
338
339 blk_stat_activate_nsecs(rwb->cb, rwb->cur_win_nsec);
340}
341
342static void wb_timer_fn(struct blk_stat_callback *cb)
343{
344 struct rq_wb *rwb = cb->data;
345 struct rq_depth *rqd = &rwb->rq_depth;
346 unsigned int inflight = wbt_inflight(rwb);
347 int status;
348
349 status = latency_exceeded(rwb, cb->stat);
350
351 trace_wbt_timer(rwb->rqos.q->backing_dev_info, status, rqd->scale_step,
352 inflight);
353
354 /*
355 * If we exceeded the latency target, step down. If we did not,
356 * step one level up. If we don't know enough to say either exceeded
357 * or ok, then don't do anything.
358 */
359 switch (status) {
360 case LAT_EXCEEDED:
361 scale_down(rwb, true);
362 break;
363 case LAT_OK:
364 scale_up(rwb);
365 break;
366 case LAT_UNKNOWN_WRITES:
367 /*
368 * We started a the center step, but don't have a valid
369 * read/write sample, but we do have writes going on.
370 * Allow step to go negative, to increase write perf.
371 */
372 scale_up(rwb);
373 break;
374 case LAT_UNKNOWN:
375 if (++rwb->unknown_cnt < RWB_UNKNOWN_BUMP)
376 break;
377 /*
378 * We get here when previously scaled reduced depth, and we
379 * currently don't have a valid read/write sample. For that
380 * case, slowly return to center state (step == 0).
381 */
382 if (rqd->scale_step > 0)
383 scale_up(rwb);
384 else if (rqd->scale_step < 0)
385 scale_down(rwb, false);
386 break;
387 default:
388 break;
389 }
390
391 /*
392 * Re-arm timer, if we have IO in flight
393 */
394 if (rqd->scale_step || inflight)
395 rwb_arm_timer(rwb);
396}
397
398static void __wbt_update_limits(struct rq_wb *rwb)
399{
400 struct rq_depth *rqd = &rwb->rq_depth;
401
402 rqd->scale_step = 0;
403 rqd->scaled_max = false;
404
405 rq_depth_calc_max_depth(rqd);
406 calc_wb_limits(rwb);
407
408 rwb_wake_all(rwb);
409}
410
411void wbt_update_limits(struct request_queue *q)
412{
413 struct rq_qos *rqos = wbt_rq_qos(q);
414 if (!rqos)
415 return;
416 __wbt_update_limits(RQWB(rqos));
417}
418
419u64 wbt_get_min_lat(struct request_queue *q)
420{
421 struct rq_qos *rqos = wbt_rq_qos(q);
422 if (!rqos)
423 return 0;
424 return RQWB(rqos)->min_lat_nsec;
425}
426
427void wbt_set_min_lat(struct request_queue *q, u64 val)
428{
429 struct rq_qos *rqos = wbt_rq_qos(q);
430 if (!rqos)
431 return;
432 RQWB(rqos)->min_lat_nsec = val;
433 RQWB(rqos)->enable_state = WBT_STATE_ON_MANUAL;
434 __wbt_update_limits(RQWB(rqos));
435}
436
437
438static bool close_io(struct rq_wb *rwb)
439{
440 const unsigned long now = jiffies;
441
442 return time_before(now, rwb->last_issue + HZ / 10) ||
443 time_before(now, rwb->last_comp + HZ / 10);
444}
445
446#define REQ_HIPRIO (REQ_SYNC | REQ_META | REQ_PRIO)
447
448static inline unsigned int get_limit(struct rq_wb *rwb, unsigned long rw)
449{
450 unsigned int limit;
451
452 /*
453 * If we got disabled, just return UINT_MAX. This ensures that
454 * we'll properly inc a new IO, and dec+wakeup at the end.
455 */
456 if (!rwb_enabled(rwb))
457 return UINT_MAX;
458
459 if ((rw & REQ_OP_MASK) == REQ_OP_DISCARD)
460 return rwb->wb_background;
461
462 /*
463 * At this point we know it's a buffered write. If this is
464 * kswapd trying to free memory, or REQ_SYNC is set, then
465 * it's WB_SYNC_ALL writeback, and we'll use the max limit for
466 * that. If the write is marked as a background write, then use
467 * the idle limit, or go to normal if we haven't had competing
468 * IO for a bit.
469 */
470 if ((rw & REQ_HIPRIO) || wb_recent_wait(rwb) || current_is_kswapd())
471 limit = rwb->rq_depth.max_depth;
472 else if ((rw & REQ_BACKGROUND) || close_io(rwb)) {
473 /*
474 * If less than 100ms since we completed unrelated IO,
475 * limit us to half the depth for background writeback.
476 */
477 limit = rwb->wb_background;
478 } else
479 limit = rwb->wb_normal;
480
481 return limit;
482}
483
484/*
485 * Block if we will exceed our limit, or if we are currently waiting for
486 * the timer to kick off queuing again.
487 */
488static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct,
489 unsigned long rw, spinlock_t *lock)
490 __releases(lock)
491 __acquires(lock)
492{
493 struct rq_wait *rqw = get_rq_wait(rwb, wb_acct);
494 DECLARE_WAITQUEUE(wait, current);
495 bool has_sleeper;
496
497 has_sleeper = wq_has_sleeper(&rqw->wait);
498 if (!has_sleeper && rq_wait_inc_below(rqw, get_limit(rwb, rw)))
499 return;
500
501 add_wait_queue_exclusive(&rqw->wait, &wait);
502 do {
503 set_current_state(TASK_UNINTERRUPTIBLE);
504
505 if (!has_sleeper && rq_wait_inc_below(rqw, get_limit(rwb, rw)))
506 break;
507
508 if (lock) {
509 spin_unlock_irq(lock);
510 io_schedule();
511 spin_lock_irq(lock);
512 } else
513 io_schedule();
514 has_sleeper = false;
515 } while (1);
516
517 __set_current_state(TASK_RUNNING);
518 remove_wait_queue(&rqw->wait, &wait);
519}
520
521static inline bool wbt_should_throttle(struct rq_wb *rwb, struct bio *bio)
522{
523 switch (bio_op(bio)) {
524 case REQ_OP_WRITE:
525 /*
526 * Don't throttle WRITE_ODIRECT
527 */
528 if ((bio->bi_opf & (REQ_SYNC | REQ_IDLE)) ==
529 (REQ_SYNC | REQ_IDLE))
530 return false;
531 /* fallthrough */
532 case REQ_OP_DISCARD:
533 return true;
534 default:
535 return false;
536 }
537}
538
539static enum wbt_flags bio_to_wbt_flags(struct rq_wb *rwb, struct bio *bio)
540{
541 enum wbt_flags flags = 0;
542
543 if (!rwb_enabled(rwb))
544 return 0;
545
546 if (bio_op(bio) == REQ_OP_READ) {
547 flags = WBT_READ;
548 } else if (wbt_should_throttle(rwb, bio)) {
549 if (current_is_kswapd())
550 flags |= WBT_KSWAPD;
551 if (bio_op(bio) == REQ_OP_DISCARD)
552 flags |= WBT_DISCARD;
553 flags |= WBT_TRACKED;
554 }
555 return flags;
556}
557
558static void wbt_cleanup(struct rq_qos *rqos, struct bio *bio)
559{
560 struct rq_wb *rwb = RQWB(rqos);
561 enum wbt_flags flags = bio_to_wbt_flags(rwb, bio);
562 __wbt_done(rqos, flags);
563}
564
565/*
566 * Returns true if the IO request should be accounted, false if not.
567 * May sleep, if we have exceeded the writeback limits. Caller can pass
568 * in an irq held spinlock, if it holds one when calling this function.
569 * If we do sleep, we'll release and re-grab it.
570 */
571static void wbt_wait(struct rq_qos *rqos, struct bio *bio, spinlock_t *lock)
572{
573 struct rq_wb *rwb = RQWB(rqos);
574 enum wbt_flags flags;
575
576 flags = bio_to_wbt_flags(rwb, bio);
577 if (!(flags & WBT_TRACKED)) {
578 if (flags & WBT_READ)
579 wb_timestamp(rwb, &rwb->last_issue);
580 return;
581 }
582
583 if (current_is_kswapd())
584 flags |= WBT_KSWAPD;
585 if (bio_op(bio) == REQ_OP_DISCARD)
586 flags |= WBT_DISCARD;
587
588 __wbt_wait(rwb, flags, bio->bi_opf, lock);
589
590 if (!blk_stat_is_active(rwb->cb))
591 rwb_arm_timer(rwb);
592}
593
594static void wbt_track(struct rq_qos *rqos, struct request *rq, struct bio *bio)
595{
596 struct rq_wb *rwb = RQWB(rqos);
597 rq->wbt_flags |= bio_to_wbt_flags(rwb, bio);
598}
599
600void wbt_issue(struct rq_qos *rqos, struct request *rq)
601{
602 struct rq_wb *rwb = RQWB(rqos);
603
604 if (!rwb_enabled(rwb))
605 return;
606
607 /*
608 * Track sync issue, in case it takes a long time to complete. Allows us
609 * to react quicker, if a sync IO takes a long time to complete. Note
610 * that this is just a hint. The request can go away when it completes,
611 * so it's important we never dereference it. We only use the address to
612 * compare with, which is why we store the sync_issue time locally.
613 */
614 if (wbt_is_read(rq) && !rwb->sync_issue) {
615 rwb->sync_cookie = rq;
616 rwb->sync_issue = rq->io_start_time_ns;
617 }
618}
619
620void wbt_requeue(struct rq_qos *rqos, struct request *rq)
621{
622 struct rq_wb *rwb = RQWB(rqos);
623 if (!rwb_enabled(rwb))
624 return;
625 if (rq == rwb->sync_cookie) {
626 rwb->sync_issue = 0;
627 rwb->sync_cookie = NULL;
628 }
629}
630
631void wbt_set_queue_depth(struct request_queue *q, unsigned int depth)
632{
633 struct rq_qos *rqos = wbt_rq_qos(q);
634 if (rqos) {
635 RQWB(rqos)->rq_depth.queue_depth = depth;
636 __wbt_update_limits(RQWB(rqos));
637 }
638}
639
640void wbt_set_write_cache(struct request_queue *q, bool write_cache_on)
641{
642 struct rq_qos *rqos = wbt_rq_qos(q);
643 if (rqos)
644 RQWB(rqos)->wc = write_cache_on;
645}
646
647/*
648 * Enable wbt if defaults are configured that way
649 */
650void wbt_enable_default(struct request_queue *q)
651{
652 struct rq_qos *rqos = wbt_rq_qos(q);
653 /* Throttling already enabled? */
654 if (rqos)
655 return;
656
657 /* Queue not registered? Maybe shutting down... */
658 if (!test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags))
659 return;
660
661 if ((q->mq_ops && IS_ENABLED(CONFIG_BLK_WBT_MQ)) ||
662 (q->request_fn && IS_ENABLED(CONFIG_BLK_WBT_SQ)))
663 wbt_init(q);
664}
665EXPORT_SYMBOL_GPL(wbt_enable_default);
666
667u64 wbt_default_latency_nsec(struct request_queue *q)
668{
669 /*
670 * We default to 2msec for non-rotational storage, and 75msec
671 * for rotational storage.
672 */
673 if (blk_queue_nonrot(q))
674 return 2000000ULL;
675 else
676 return 75000000ULL;
677}
678
679static int wbt_data_dir(const struct request *rq)
680{
681 const int op = req_op(rq);
682
683 if (op == REQ_OP_READ)
684 return READ;
685 else if (op_is_write(op))
686 return WRITE;
687
688 /* don't account */
689 return -1;
690}
691
692static void wbt_exit(struct rq_qos *rqos)
693{
694 struct rq_wb *rwb = RQWB(rqos);
695 struct request_queue *q = rqos->q;
696
697 blk_stat_remove_callback(q, rwb->cb);
698 blk_stat_free_callback(rwb->cb);
699 kfree(rwb);
700}
701
702/*
703 * Disable wbt, if enabled by default.
704 */
705void wbt_disable_default(struct request_queue *q)
706{
707 struct rq_qos *rqos = wbt_rq_qos(q);
708 struct rq_wb *rwb;
709 if (!rqos)
710 return;
711 rwb = RQWB(rqos);
712 if (rwb->enable_state == WBT_STATE_ON_DEFAULT)
713 rwb->wb_normal = 0;
714}
715EXPORT_SYMBOL_GPL(wbt_disable_default);
716
717
718static struct rq_qos_ops wbt_rqos_ops = {
719 .throttle = wbt_wait,
720 .issue = wbt_issue,
721 .track = wbt_track,
722 .requeue = wbt_requeue,
723 .done = wbt_done,
724 .cleanup = wbt_cleanup,
725 .exit = wbt_exit,
726};
727
728int wbt_init(struct request_queue *q)
729{
730 struct rq_wb *rwb;
731 int i;
732
733 rwb = kzalloc(sizeof(*rwb), GFP_KERNEL);
734 if (!rwb)
735 return -ENOMEM;
736
737 rwb->cb = blk_stat_alloc_callback(wb_timer_fn, wbt_data_dir, 2, rwb);
738 if (!rwb->cb) {
739 kfree(rwb);
740 return -ENOMEM;
741 }
742
743 for (i = 0; i < WBT_NUM_RWQ; i++)
744 rq_wait_init(&rwb->rq_wait[i]);
745
746 rwb->rqos.id = RQ_QOS_WBT;
747 rwb->rqos.ops = &wbt_rqos_ops;
748 rwb->rqos.q = q;
749 rwb->last_comp = rwb->last_issue = jiffies;
750 rwb->win_nsec = RWB_WINDOW_NSEC;
751 rwb->enable_state = WBT_STATE_ON_DEFAULT;
752 rwb->wc = 1;
753 rwb->rq_depth.default_depth = RWB_DEF_DEPTH;
754 __wbt_update_limits(rwb);
755
756 /*
757 * Assign rwb and add the stats callback.
758 */
759 rq_qos_add(q, &rwb->rqos);
760 blk_stat_add_callback(q, rwb->cb);
761
762 rwb->min_lat_nsec = wbt_default_latency_nsec(q);
763
764 wbt_set_queue_depth(q, blk_queue_depth(q));
765 wbt_set_write_cache(q, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
766
767 return 0;
768}