1 /* SPDX-License-Identifier: GPL-2.0
3 * IO cost model based controller.
5 * Copyright (C) 2019 Tejun Heo <tj@kernel.org>
6 * Copyright (C) 2019 Andy Newell <newella@fb.com>
7 * Copyright (C) 2019 Facebook
9 * One challenge of controlling IO resources is the lack of trivially
10 * observable cost metric. This is distinguished from CPU and memory where
11 * wallclock time and the number of bytes can serve as accurate enough
14 * Bandwidth and iops are the most commonly used metrics for IO devices but
15 * depending on the type and specifics of the device, different IO patterns
16 * easily lead to multiple orders of magnitude variations rendering them
17 * useless for the purpose of IO capacity distribution. While on-device
18 * time, with a lot of clutches, could serve as a useful approximation for
19 * non-queued rotational devices, this is no longer viable with modern
20 * devices, even the rotational ones.
22 * While there is no cost metric we can trivially observe, it isn't a
23 * complete mystery. For example, on a rotational device, seek cost
24 * dominates while a contiguous transfer contributes a smaller amount
25 * proportional to the size. If we can characterize at least the relative
26 * costs of these different types of IOs, it should be possible to
27 * implement a reasonable work-conserving proportional IO resource
32 * IO cost model estimates the cost of an IO given its basic parameters and
33 * history (e.g. the end sector of the last IO). The cost is measured in
34 * device time. If a given IO is estimated to cost 10ms, the device should
35 * be able to process ~100 of those IOs in a second.
37 * Currently, there's only one builtin cost model - linear. Each IO is
38 * classified as sequential or random and given a base cost accordingly.
39 * On top of that, a size cost proportional to the length of the IO is
40 * added. While simple, this model captures the operational
41 * characteristics of a wide varienty of devices well enough. Default
42 * paramters for several different classes of devices are provided and the
43 * parameters can be configured from userspace via
44 * /sys/fs/cgroup/io.cost.model.
46 * If needed, tools/cgroup/iocost_coef_gen.py can be used to generate
47 * device-specific coefficients.
51 * The device virtual time (vtime) is used as the primary control metric.
52 * The control strategy is composed of the following three parts.
54 * 2-1. Vtime Distribution
56 * When a cgroup becomes active in terms of IOs, its hierarchical share is
57 * calculated. Please consider the following hierarchy where the numbers
58 * inside parentheses denote the configured weights.
64 * A0 (w:100) A1 (w:100)
66 * If B is idle and only A0 and A1 are actively issuing IOs, as the two are
67 * of equal weight, each gets 50% share. If then B starts issuing IOs, B
68 * gets 300/(100+300) or 75% share, and A0 and A1 equally splits the rest,
69 * 12.5% each. The distribution mechanism only cares about these flattened
70 * shares. They're called hweights (hierarchical weights) and always add
71 * upto 1 (HWEIGHT_WHOLE).
73 * A given cgroup's vtime runs slower in inverse proportion to its hweight.
74 * For example, with 12.5% weight, A0's time runs 8 times slower (100/12.5)
75 * against the device vtime - an IO which takes 10ms on the underlying
76 * device is considered to take 80ms on A0.
78 * This constitutes the basis of IO capacity distribution. Each cgroup's
79 * vtime is running at a rate determined by its hweight. A cgroup tracks
80 * the vtime consumed by past IOs and can issue a new IO iff doing so
81 * wouldn't outrun the current device vtime. Otherwise, the IO is
82 * suspended until the vtime has progressed enough to cover it.
84 * 2-2. Vrate Adjustment
86 * It's unrealistic to expect the cost model to be perfect. There are too
87 * many devices and even on the same device the overall performance
88 * fluctuates depending on numerous factors such as IO mixture and device
89 * internal garbage collection. The controller needs to adapt dynamically.
91 * This is achieved by adjusting the overall IO rate according to how busy
92 * the device is. If the device becomes overloaded, we're sending down too
93 * many IOs and should generally slow down. If there are waiting issuers
94 * but the device isn't saturated, we're issuing too few and should
97 * To slow down, we lower the vrate - the rate at which the device vtime
98 * passes compared to the wall clock. For example, if the vtime is running
99 * at the vrate of 75%, all cgroups added up would only be able to issue
100 * 750ms worth of IOs per second, and vice-versa for speeding up.
102 * Device business is determined using two criteria - rq wait and
103 * completion latencies.
105 * When a device gets saturated, the on-device and then the request queues
106 * fill up and a bio which is ready to be issued has to wait for a request
107 * to become available. When this delay becomes noticeable, it's a clear
108 * indication that the device is saturated and we lower the vrate. This
109 * saturation signal is fairly conservative as it only triggers when both
110 * hardware and software queues are filled up, and is used as the default
113 * As devices can have deep queues and be unfair in how the queued commands
114 * are executed, soley depending on rq wait may not result in satisfactory
115 * control quality. For a better control quality, completion latency QoS
116 * parameters can be configured so that the device is considered saturated
117 * if N'th percentile completion latency rises above the set point.
119 * The completion latency requirements are a function of both the
120 * underlying device characteristics and the desired IO latency quality of
121 * service. There is an inherent trade-off - the tighter the latency QoS,
122 * the higher the bandwidth lossage. Latency QoS is disabled by default
123 * and can be set through /sys/fs/cgroup/io.cost.qos.
125 * 2-3. Work Conservation
127 * Imagine two cgroups A and B with equal weights. A is issuing a small IO
128 * periodically while B is sending out enough parallel IOs to saturate the
129 * device on its own. Let's say A's usage amounts to 100ms worth of IO
130 * cost per second, i.e., 10% of the device capacity. The naive
131 * distribution of half and half would lead to 60% utilization of the
132 * device, a significant reduction in the total amount of work done
133 * compared to free-for-all competition. This is too high a cost to pay
136 * To conserve the total amount of work done, we keep track of how much
137 * each active cgroup is actually using and yield part of its weight if
138 * there are other cgroups which can make use of it. In the above case,
139 * A's weight will be lowered so that it hovers above the actual usage and
140 * B would be able to use the rest.
142 * As we don't want to penalize a cgroup for donating its weight, the
143 * surplus weight adjustment factors in a margin and has an immediate
144 * snapback mechanism in case the cgroup needs more IO vtime for itself.
146 * Note that adjusting down surplus weights has the same effects as
147 * accelerating vtime for other cgroups and work conservation can also be
148 * implemented by adjusting vrate dynamically. However, squaring who can
149 * donate and should take back how much requires hweight propagations
150 * anyway making it easier to implement and understand as a separate
155 * Instead of debugfs or other clumsy monitoring mechanisms, this
156 * controller uses a drgn based monitoring script -
157 * tools/cgroup/iocost_monitor.py. For details on drgn, please see
158 * https://github.com/osandov/drgn. The ouput looks like the following.
160 * sdb RUN per=300ms cur_per=234.218:v203.695 busy= +1 vrate= 62.12%
161 * active weight hweight% inflt% del_ms usages%
162 * test/a * 50/ 50 33.33/ 33.33 27.65 0*041 033:033:033
163 * test/b * 100/ 100 66.67/ 66.67 17.56 0*000 066:079:077
165 * - per : Timer period
166 * - cur_per : Internal wall and device vtime clock
167 * - vrate : Device virtual time rate against wall clock
168 * - weight : Surplus-adjusted and configured weights
169 * - hweight : Surplus-adjusted and configured hierarchical weights
170 * - inflt : The percentage of in-flight IO cost at the end of last period
171 * - del_ms : Deferred issuer delay induction level and duration
172 * - usages : Usage history
175 #include <linux/kernel.h>
176 #include <linux/module.h>
177 #include <linux/timer.h>
178 #include <linux/time64.h>
179 #include <linux/parser.h>
180 #include <linux/sched/signal.h>
181 #include <linux/blk-cgroup.h>
182 #include "blk-rq-qos.h"
183 #include "blk-stat.h"
186 #ifdef CONFIG_TRACEPOINTS
188 /* copied from TRACE_CGROUP_PATH, see cgroup-internal.h */
189 #define TRACE_IOCG_PATH_LEN 1024
190 static DEFINE_SPINLOCK(trace_iocg_path_lock);
191 static char trace_iocg_path[TRACE_IOCG_PATH_LEN];
193 #define TRACE_IOCG_PATH(type, iocg, ...) \
195 unsigned long flags; \
196 if (trace_iocost_##type##_enabled()) { \
197 spin_lock_irqsave(&trace_iocg_path_lock, flags); \
198 cgroup_path(iocg_to_blkg(iocg)->blkcg->css.cgroup, \
199 trace_iocg_path, TRACE_IOCG_PATH_LEN); \
200 trace_iocost_##type(iocg, trace_iocg_path, \
202 spin_unlock_irqrestore(&trace_iocg_path_lock, flags); \
206 #else /* CONFIG_TRACE_POINTS */
207 #define TRACE_IOCG_PATH(type, iocg, ...) do { } while (0)
208 #endif /* CONFIG_TRACE_POINTS */
213 /* timer period is calculated from latency requirements, bound it */
214 MIN_PERIOD = USEC_PER_MSEC,
215 MAX_PERIOD = USEC_PER_SEC,
218 * A cgroup's vtime can run 50% behind the device vtime, which
219 * serves as its IO credit buffer. Surplus weight adjustment is
220 * immediately canceled if the vtime margin runs below 10%.
223 INUSE_MARGIN_PCT = 10,
225 /* Have some play in waitq timer operations */
226 WAITQ_TIMER_MARGIN_PCT = 5,
229 * vtime can wrap well within a reasonable uptime when vrate is
230 * consistently raised. Don't trust recorded cgroup vtime if the
231 * period counter indicates that it's older than 5mins.
233 VTIME_VALID_DUR = 300 * USEC_PER_SEC,
236 * Remember the past three non-zero usages and use the max for
237 * surplus calculation. Three slots guarantee that we remember one
238 * full period usage from the last active stretch even after
239 * partial deactivation and re-activation periods. Don't start
240 * giving away weight before collecting two data points to prevent
241 * hweight adjustments based on one partial activation period.
244 MIN_VALID_USAGES = 2,
246 /* 1/64k is granular enough and can easily be handled w/ u32 */
247 HWEIGHT_WHOLE = 1 << 16,
250 * As vtime is used to calculate the cost of each IO, it needs to
251 * be fairly high precision. For example, it should be able to
252 * represent the cost of a single page worth of discard with
253 * suffificient accuracy. At the same time, it should be able to
254 * represent reasonably long enough durations to be useful and
255 * convenient during operation.
257 * 1s worth of vtime is 2^37. This gives us both sub-nanosecond
258 * granularity and days of wrap-around time even at extreme vrates.
260 VTIME_PER_SEC_SHIFT = 37,
261 VTIME_PER_SEC = 1LLU << VTIME_PER_SEC_SHIFT,
262 VTIME_PER_USEC = VTIME_PER_SEC / USEC_PER_SEC,
264 /* bound vrate adjustments within two orders of magnitude */
265 VRATE_MIN_PPM = 10000, /* 1% */
266 VRATE_MAX_PPM = 100000000, /* 10000% */
268 VRATE_MIN = VTIME_PER_USEC * VRATE_MIN_PPM / MILLION,
269 VRATE_CLAMP_ADJ_PCT = 4,
271 /* if IOs end up waiting for requests, issue less */
272 RQ_WAIT_BUSY_PCT = 5,
274 /* unbusy hysterisis */
277 /* don't let cmds which take a very long time pin lagging for too long */
278 MAX_LAGGING_PERIODS = 10,
281 * If usage% * 1.25 + 2% is lower than hweight% by more than 3%,
282 * donate the surplus.
284 SURPLUS_SCALE_PCT = 125, /* * 125% */
285 SURPLUS_SCALE_ABS = HWEIGHT_WHOLE / 50, /* + 2% */
286 SURPLUS_MIN_ADJ_DELTA = HWEIGHT_WHOLE / 33, /* 3% */
288 /* switch iff the conditions are met for longer than this */
289 AUTOP_CYCLE_NSEC = 10LLU * NSEC_PER_SEC,
292 * Count IO size in 4k pages. The 12bit shift helps keeping
293 * size-proportional components of cost calculation in closer
294 * numbers of digits to per-IO cost components.
297 IOC_PAGE_SIZE = 1 << IOC_PAGE_SHIFT,
298 IOC_SECT_TO_PAGE_SHIFT = IOC_PAGE_SHIFT - SECTOR_SHIFT,
300 /* if apart further than 16M, consider randio for linear model */
301 LCOEF_RANDIO_PAGES = 4096,
310 /* io.cost.qos controls including per-dev enable of the whole controller */
317 /* io.cost.qos params */
328 /* io.cost.model controls */
335 /* builtin linear cost model coefficients */
367 u32 qos[NR_QOS_PARAMS];
368 u64 i_lcoefs[NR_I_LCOEFS];
369 u64 lcoefs[NR_LCOEFS];
370 u32 too_fast_vrate_pct;
371 u32 too_slow_vrate_pct;
381 struct ioc_pcpu_stat {
382 struct ioc_missed missed[2];
394 struct ioc_params params;
401 struct timer_list timer;
402 struct list_head active_iocgs; /* active cgroups */
403 struct ioc_pcpu_stat __percpu *pcpu_stat;
405 enum ioc_running running;
406 atomic64_t vtime_rate;
408 seqcount_t period_seqcount;
409 u32 period_at; /* wallclock starttime */
410 u64 period_at_vtime; /* vtime starttime */
412 atomic64_t cur_period; /* inc'd each period */
413 int busy_level; /* saturation history */
415 u64 inuse_margin_vtime;
416 bool weights_updated;
417 atomic_t hweight_gen; /* for lazy hweights */
419 u64 autop_too_fast_at;
420 u64 autop_too_slow_at;
422 bool user_qos_params:1;
423 bool user_cost_model:1;
426 /* per device-cgroup pair */
428 struct blkg_policy_data pd;
432 * A iocg can get its weight from two sources - an explicit
433 * per-device-cgroup configuration or the default weight of the
434 * cgroup. `cfg_weight` is the explicit per-device-cgroup
435 * configuration. `weight` is the effective considering both
438 * When an idle cgroup becomes active its `active` goes from 0 to
439 * `weight`. `inuse` is the surplus adjusted active weight.
440 * `active` and `inuse` are used to calculate `hweight_active` and
443 * `last_inuse` remembers `inuse` while an iocg is idle to persist
444 * surplus adjustments.
452 sector_t cursor; /* to detect randio */
455 * `vtime` is this iocg's vtime cursor which progresses as IOs are
456 * issued. If lagging behind device vtime, the delta represents
457 * the currently available IO budget. If runnning ahead, the
460 * `vtime_done` is the same but progressed on completion rather
461 * than issue. The delta behind `vtime` represents the cost of
462 * currently in-flight IOs.
464 * `last_vtime` is used to remember `vtime` at the end of the last
465 * period to calculate utilization.
468 atomic64_t done_vtime;
472 * The period this iocg was last active in. Used for deactivation
473 * and invalidating `vtime`.
475 atomic64_t active_period;
476 struct list_head active_list;
478 /* see __propagate_active_weight() and current_hweight() for details */
479 u64 child_active_sum;
486 struct wait_queue_head waitq;
487 struct hrtimer waitq_timer;
488 struct hrtimer delay_timer;
490 /* usage is recorded as fractions of HWEIGHT_WHOLE */
492 u32 usages[NR_USAGE_SLOTS];
494 /* this iocg's depth in the hierarchy and ancestors including self */
496 struct ioc_gq *ancestors[];
501 struct blkcg_policy_data cpd;
502 unsigned int dfl_weight;
513 struct wait_queue_entry wait;
519 struct iocg_wake_ctx {
525 static const struct ioc_params autop[] = {
528 [QOS_RLAT] = 50000, /* 50ms */
530 [QOS_MIN] = VRATE_MIN_PPM,
531 [QOS_MAX] = VRATE_MAX_PPM,
534 [I_LCOEF_RBPS] = 174019176,
535 [I_LCOEF_RSEQIOPS] = 41708,
536 [I_LCOEF_RRANDIOPS] = 370,
537 [I_LCOEF_WBPS] = 178075866,
538 [I_LCOEF_WSEQIOPS] = 42705,
539 [I_LCOEF_WRANDIOPS] = 378,
544 [QOS_RLAT] = 25000, /* 25ms */
546 [QOS_MIN] = VRATE_MIN_PPM,
547 [QOS_MAX] = VRATE_MAX_PPM,
550 [I_LCOEF_RBPS] = 245855193,
551 [I_LCOEF_RSEQIOPS] = 61575,
552 [I_LCOEF_RRANDIOPS] = 6946,
553 [I_LCOEF_WBPS] = 141365009,
554 [I_LCOEF_WSEQIOPS] = 33716,
555 [I_LCOEF_WRANDIOPS] = 26796,
560 [QOS_RLAT] = 25000, /* 25ms */
562 [QOS_MIN] = VRATE_MIN_PPM,
563 [QOS_MAX] = VRATE_MAX_PPM,
566 [I_LCOEF_RBPS] = 488636629,
567 [I_LCOEF_RSEQIOPS] = 8932,
568 [I_LCOEF_RRANDIOPS] = 8518,
569 [I_LCOEF_WBPS] = 427891549,
570 [I_LCOEF_WSEQIOPS] = 28755,
571 [I_LCOEF_WRANDIOPS] = 21940,
573 .too_fast_vrate_pct = 500,
577 [QOS_RLAT] = 5000, /* 5ms */
579 [QOS_MIN] = VRATE_MIN_PPM,
580 [QOS_MAX] = VRATE_MAX_PPM,
583 [I_LCOEF_RBPS] = 3102524156LLU,
584 [I_LCOEF_RSEQIOPS] = 724816,
585 [I_LCOEF_RRANDIOPS] = 778122,
586 [I_LCOEF_WBPS] = 1742780862LLU,
587 [I_LCOEF_WSEQIOPS] = 425702,
588 [I_LCOEF_WRANDIOPS] = 443193,
590 .too_slow_vrate_pct = 10,
595 * vrate adjust percentages indexed by ioc->busy_level. We adjust up on
596 * vtime credit shortage and down on device saturation.
598 static u32 vrate_adj_pct[] =
600 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
601 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
602 4, 4, 4, 4, 4, 4, 4, 4, 8, 8, 8, 8, 8, 8, 8, 8, 16 };
604 static struct blkcg_policy blkcg_policy_iocost;
606 /* accessors and helpers */
607 static struct ioc *rqos_to_ioc(struct rq_qos *rqos)
609 return container_of(rqos, struct ioc, rqos);
612 static struct ioc *q_to_ioc(struct request_queue *q)
614 return rqos_to_ioc(rq_qos_id(q, RQ_QOS_COST));
617 static const char *q_name(struct request_queue *q)
619 if (test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags))
620 return kobject_name(q->kobj.parent);
625 static const char __maybe_unused *ioc_name(struct ioc *ioc)
627 return q_name(ioc->rqos.q);
630 static struct ioc_gq *pd_to_iocg(struct blkg_policy_data *pd)
632 return pd ? container_of(pd, struct ioc_gq, pd) : NULL;
635 static struct ioc_gq *blkg_to_iocg(struct blkcg_gq *blkg)
637 return pd_to_iocg(blkg_to_pd(blkg, &blkcg_policy_iocost));
640 static struct blkcg_gq *iocg_to_blkg(struct ioc_gq *iocg)
642 return pd_to_blkg(&iocg->pd);
645 static struct ioc_cgrp *blkcg_to_iocc(struct blkcg *blkcg)
647 return container_of(blkcg_to_cpd(blkcg, &blkcg_policy_iocost),
648 struct ioc_cgrp, cpd);
652 * Scale @abs_cost to the inverse of @hw_inuse. The lower the hierarchical
653 * weight, the more expensive each IO.
655 static u64 abs_cost_to_cost(u64 abs_cost, u32 hw_inuse)
657 return DIV64_U64_ROUND_UP(abs_cost * HWEIGHT_WHOLE, hw_inuse);
660 static void iocg_commit_bio(struct ioc_gq *iocg, struct bio *bio, u64 cost)
662 bio->bi_iocost_cost = cost;
663 atomic64_add(cost, &iocg->vtime);
666 #define CREATE_TRACE_POINTS
667 #include <trace/events/iocost.h>
669 /* latency Qos params changed, update period_us and all the dependent params */
670 static void ioc_refresh_period_us(struct ioc *ioc)
672 u32 ppm, lat, multi, period_us;
674 lockdep_assert_held(&ioc->lock);
676 /* pick the higher latency target */
677 if (ioc->params.qos[QOS_RLAT] >= ioc->params.qos[QOS_WLAT]) {
678 ppm = ioc->params.qos[QOS_RPPM];
679 lat = ioc->params.qos[QOS_RLAT];
681 ppm = ioc->params.qos[QOS_WPPM];
682 lat = ioc->params.qos[QOS_WLAT];
686 * We want the period to be long enough to contain a healthy number
687 * of IOs while short enough for granular control. Define it as a
688 * multiple of the latency target. Ideally, the multiplier should
689 * be scaled according to the percentile so that it would nominally
690 * contain a certain number of requests. Let's be simpler and
691 * scale it linearly so that it's 2x >= pct(90) and 10x at pct(50).
694 multi = max_t(u32, (MILLION - ppm) / 50000, 2);
697 period_us = multi * lat;
698 period_us = clamp_t(u32, period_us, MIN_PERIOD, MAX_PERIOD);
700 /* calculate dependent params */
701 ioc->period_us = period_us;
702 ioc->margin_us = period_us * MARGIN_PCT / 100;
703 ioc->inuse_margin_vtime = DIV64_U64_ROUND_UP(
704 period_us * VTIME_PER_USEC * INUSE_MARGIN_PCT, 100);
707 static int ioc_autop_idx(struct ioc *ioc)
709 int idx = ioc->autop_idx;
710 const struct ioc_params *p = &autop[idx];
715 if (!blk_queue_nonrot(ioc->rqos.q))
718 /* handle SATA SSDs w/ broken NCQ */
719 if (blk_queue_depth(ioc->rqos.q) == 1)
720 return AUTOP_SSD_QD1;
722 /* use one of the normal ssd sets */
723 if (idx < AUTOP_SSD_DFL)
724 return AUTOP_SSD_DFL;
726 /* if user is overriding anything, maintain what was there */
727 if (ioc->user_qos_params || ioc->user_cost_model)
730 /* step up/down based on the vrate */
731 vrate_pct = div64_u64(atomic64_read(&ioc->vtime_rate) * 100,
733 now_ns = ktime_get_ns();
735 if (p->too_fast_vrate_pct && p->too_fast_vrate_pct <= vrate_pct) {
736 if (!ioc->autop_too_fast_at)
737 ioc->autop_too_fast_at = now_ns;
738 if (now_ns - ioc->autop_too_fast_at >= AUTOP_CYCLE_NSEC)
741 ioc->autop_too_fast_at = 0;
744 if (p->too_slow_vrate_pct && p->too_slow_vrate_pct >= vrate_pct) {
745 if (!ioc->autop_too_slow_at)
746 ioc->autop_too_slow_at = now_ns;
747 if (now_ns - ioc->autop_too_slow_at >= AUTOP_CYCLE_NSEC)
750 ioc->autop_too_slow_at = 0;
757 * Take the followings as input
759 * @bps maximum sequential throughput
760 * @seqiops maximum sequential 4k iops
761 * @randiops maximum random 4k iops
763 * and calculate the linear model cost coefficients.
765 * *@page per-page cost 1s / (@bps / 4096)
766 * *@seqio base cost of a seq IO max((1s / @seqiops) - *@page, 0)
767 * @randiops base cost of a rand IO max((1s / @randiops) - *@page, 0)
769 static void calc_lcoefs(u64 bps, u64 seqiops, u64 randiops,
770 u64 *page, u64 *seqio, u64 *randio)
774 *page = *seqio = *randio = 0;
777 *page = DIV64_U64_ROUND_UP(VTIME_PER_SEC,
778 DIV_ROUND_UP_ULL(bps, IOC_PAGE_SIZE));
781 v = DIV64_U64_ROUND_UP(VTIME_PER_SEC, seqiops);
787 v = DIV64_U64_ROUND_UP(VTIME_PER_SEC, randiops);
793 static void ioc_refresh_lcoefs(struct ioc *ioc)
795 u64 *u = ioc->params.i_lcoefs;
796 u64 *c = ioc->params.lcoefs;
798 calc_lcoefs(u[I_LCOEF_RBPS], u[I_LCOEF_RSEQIOPS], u[I_LCOEF_RRANDIOPS],
799 &c[LCOEF_RPAGE], &c[LCOEF_RSEQIO], &c[LCOEF_RRANDIO]);
800 calc_lcoefs(u[I_LCOEF_WBPS], u[I_LCOEF_WSEQIOPS], u[I_LCOEF_WRANDIOPS],
801 &c[LCOEF_WPAGE], &c[LCOEF_WSEQIO], &c[LCOEF_WRANDIO]);
804 static bool ioc_refresh_params(struct ioc *ioc, bool force)
806 const struct ioc_params *p;
809 lockdep_assert_held(&ioc->lock);
811 idx = ioc_autop_idx(ioc);
814 if (idx == ioc->autop_idx && !force)
817 if (idx != ioc->autop_idx)
818 atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC);
820 ioc->autop_idx = idx;
821 ioc->autop_too_fast_at = 0;
822 ioc->autop_too_slow_at = 0;
824 if (!ioc->user_qos_params)
825 memcpy(ioc->params.qos, p->qos, sizeof(p->qos));
826 if (!ioc->user_cost_model)
827 memcpy(ioc->params.i_lcoefs, p->i_lcoefs, sizeof(p->i_lcoefs));
829 ioc_refresh_period_us(ioc);
830 ioc_refresh_lcoefs(ioc);
832 ioc->vrate_min = DIV64_U64_ROUND_UP((u64)ioc->params.qos[QOS_MIN] *
833 VTIME_PER_USEC, MILLION);
834 ioc->vrate_max = div64_u64((u64)ioc->params.qos[QOS_MAX] *
835 VTIME_PER_USEC, MILLION);
840 /* take a snapshot of the current [v]time and vrate */
841 static void ioc_now(struct ioc *ioc, struct ioc_now *now)
845 now->now_ns = ktime_get();
846 now->now = ktime_to_us(now->now_ns);
847 now->vrate = atomic64_read(&ioc->vtime_rate);
850 * The current vtime is
852 * vtime at period start + (wallclock time since the start) * vrate
854 * As a consistent snapshot of `period_at_vtime` and `period_at` is
855 * needed, they're seqcount protected.
858 seq = read_seqcount_begin(&ioc->period_seqcount);
859 now->vnow = ioc->period_at_vtime +
860 (now->now - ioc->period_at) * now->vrate;
861 } while (read_seqcount_retry(&ioc->period_seqcount, seq));
864 static void ioc_start_period(struct ioc *ioc, struct ioc_now *now)
866 lockdep_assert_held(&ioc->lock);
867 WARN_ON_ONCE(ioc->running != IOC_RUNNING);
869 write_seqcount_begin(&ioc->period_seqcount);
870 ioc->period_at = now->now;
871 ioc->period_at_vtime = now->vnow;
872 write_seqcount_end(&ioc->period_seqcount);
874 ioc->timer.expires = jiffies + usecs_to_jiffies(ioc->period_us);
875 add_timer(&ioc->timer);
879 * Update @iocg's `active` and `inuse` to @active and @inuse, update level
880 * weight sums and propagate upwards accordingly.
882 static void __propagate_active_weight(struct ioc_gq *iocg, u32 active, u32 inuse)
884 struct ioc *ioc = iocg->ioc;
887 lockdep_assert_held(&ioc->lock);
889 inuse = min(active, inuse);
891 for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
892 struct ioc_gq *parent = iocg->ancestors[lvl];
893 struct ioc_gq *child = iocg->ancestors[lvl + 1];
894 u32 parent_active = 0, parent_inuse = 0;
896 /* update the level sums */
897 parent->child_active_sum += (s32)(active - child->active);
898 parent->child_inuse_sum += (s32)(inuse - child->inuse);
899 /* apply the udpates */
900 child->active = active;
901 child->inuse = inuse;
904 * The delta between inuse and active sums indicates that
905 * that much of weight is being given away. Parent's inuse
906 * and active should reflect the ratio.
908 if (parent->child_active_sum) {
909 parent_active = parent->weight;
910 parent_inuse = DIV64_U64_ROUND_UP(
911 parent_active * parent->child_inuse_sum,
912 parent->child_active_sum);
915 /* do we need to keep walking up? */
916 if (parent_active == parent->active &&
917 parent_inuse == parent->inuse)
920 active = parent_active;
921 inuse = parent_inuse;
924 ioc->weights_updated = true;
927 static void commit_active_weights(struct ioc *ioc)
929 lockdep_assert_held(&ioc->lock);
931 if (ioc->weights_updated) {
932 /* paired with rmb in current_hweight(), see there */
934 atomic_inc(&ioc->hweight_gen);
935 ioc->weights_updated = false;
939 static void propagate_active_weight(struct ioc_gq *iocg, u32 active, u32 inuse)
941 __propagate_active_weight(iocg, active, inuse);
942 commit_active_weights(iocg->ioc);
945 static void current_hweight(struct ioc_gq *iocg, u32 *hw_activep, u32 *hw_inusep)
947 struct ioc *ioc = iocg->ioc;
952 /* hot path - if uptodate, use cached */
953 ioc_gen = atomic_read(&ioc->hweight_gen);
954 if (ioc_gen == iocg->hweight_gen)
958 * Paired with wmb in commit_active_weights(). If we saw the
959 * updated hweight_gen, all the weight updates from
960 * __propagate_active_weight() are visible too.
962 * We can race with weight updates during calculation and get it
963 * wrong. However, hweight_gen would have changed and a future
964 * reader will recalculate and we're guaranteed to discard the
969 hwa = hwi = HWEIGHT_WHOLE;
970 for (lvl = 0; lvl <= iocg->level - 1; lvl++) {
971 struct ioc_gq *parent = iocg->ancestors[lvl];
972 struct ioc_gq *child = iocg->ancestors[lvl + 1];
973 u32 active_sum = READ_ONCE(parent->child_active_sum);
974 u32 inuse_sum = READ_ONCE(parent->child_inuse_sum);
975 u32 active = READ_ONCE(child->active);
976 u32 inuse = READ_ONCE(child->inuse);
978 /* we can race with deactivations and either may read as zero */
979 if (!active_sum || !inuse_sum)
982 active_sum = max(active, active_sum);
983 hwa = hwa * active / active_sum; /* max 16bits * 10000 */
985 inuse_sum = max(inuse, inuse_sum);
986 hwi = hwi * inuse / inuse_sum; /* max 16bits * 10000 */
989 iocg->hweight_active = max_t(u32, hwa, 1);
990 iocg->hweight_inuse = max_t(u32, hwi, 1);
991 iocg->hweight_gen = ioc_gen;
994 *hw_activep = iocg->hweight_active;
996 *hw_inusep = iocg->hweight_inuse;
999 static void weight_updated(struct ioc_gq *iocg)
1001 struct ioc *ioc = iocg->ioc;
1002 struct blkcg_gq *blkg = iocg_to_blkg(iocg);
1003 struct ioc_cgrp *iocc = blkcg_to_iocc(blkg->blkcg);
1006 lockdep_assert_held(&ioc->lock);
1008 weight = iocg->cfg_weight ?: iocc->dfl_weight;
1009 if (weight != iocg->weight && iocg->active)
1010 propagate_active_weight(iocg, weight,
1011 DIV64_U64_ROUND_UP(iocg->inuse * weight, iocg->weight));
1012 iocg->weight = weight;
1015 static bool iocg_activate(struct ioc_gq *iocg, struct ioc_now *now)
1017 struct ioc *ioc = iocg->ioc;
1018 u64 last_period, cur_period, max_period_delta;
1019 u64 vtime, vmargin, vmin;
1023 * If seem to be already active, just update the stamp to tell the
1024 * timer that we're still active. We don't mind occassional races.
1026 if (!list_empty(&iocg->active_list)) {
1028 cur_period = atomic64_read(&ioc->cur_period);
1029 if (atomic64_read(&iocg->active_period) != cur_period)
1030 atomic64_set(&iocg->active_period, cur_period);
1034 /* racy check on internal node IOs, treat as root level IOs */
1035 if (iocg->child_active_sum)
1038 spin_lock_irq(&ioc->lock);
1043 cur_period = atomic64_read(&ioc->cur_period);
1044 last_period = atomic64_read(&iocg->active_period);
1045 atomic64_set(&iocg->active_period, cur_period);
1047 /* already activated or breaking leaf-only constraint? */
1048 for (i = iocg->level; i > 0; i--)
1049 if (!list_empty(&iocg->active_list))
1051 if (iocg->child_active_sum)
1055 * vtime may wrap when vrate is raised substantially due to
1056 * underestimated IO costs. Look at the period and ignore its
1057 * vtime if the iocg has been idle for too long. Also, cap the
1058 * budget it can start with to the margin.
1060 max_period_delta = DIV64_U64_ROUND_UP(VTIME_VALID_DUR, ioc->period_us);
1061 vtime = atomic64_read(&iocg->vtime);
1062 vmargin = ioc->margin_us * now->vrate;
1063 vmin = now->vnow - vmargin;
1065 if (last_period + max_period_delta < cur_period ||
1066 time_before64(vtime, vmin)) {
1067 atomic64_add(vmin - vtime, &iocg->vtime);
1068 atomic64_add(vmin - vtime, &iocg->done_vtime);
1073 * Activate, propagate weight and start period timer if not
1074 * running. Reset hweight_gen to avoid accidental match from
1077 iocg->hweight_gen = atomic_read(&ioc->hweight_gen) - 1;
1078 list_add(&iocg->active_list, &ioc->active_iocgs);
1079 propagate_active_weight(iocg, iocg->weight,
1080 iocg->last_inuse ?: iocg->weight);
1082 TRACE_IOCG_PATH(iocg_activate, iocg, now,
1083 last_period, cur_period, vtime);
1085 iocg->last_vtime = vtime;
1087 if (ioc->running == IOC_IDLE) {
1088 ioc->running = IOC_RUNNING;
1089 ioc_start_period(ioc, now);
1092 spin_unlock_irq(&ioc->lock);
1096 spin_unlock_irq(&ioc->lock);
1100 static int iocg_wake_fn(struct wait_queue_entry *wq_entry, unsigned mode,
1101 int flags, void *key)
1103 struct iocg_wait *wait = container_of(wq_entry, struct iocg_wait, wait);
1104 struct iocg_wake_ctx *ctx = (struct iocg_wake_ctx *)key;
1105 u64 cost = abs_cost_to_cost(wait->abs_cost, ctx->hw_inuse);
1107 ctx->vbudget -= cost;
1109 if (ctx->vbudget < 0)
1112 iocg_commit_bio(ctx->iocg, wait->bio, cost);
1115 * autoremove_wake_function() removes the wait entry only when it
1116 * actually changed the task state. We want the wait always
1117 * removed. Remove explicitly and use default_wake_function().
1119 list_del_init(&wq_entry->entry);
1120 wait->committed = true;
1122 default_wake_function(wq_entry, mode, flags, key);
1126 static void iocg_kick_waitq(struct ioc_gq *iocg, struct ioc_now *now)
1128 struct ioc *ioc = iocg->ioc;
1129 struct iocg_wake_ctx ctx = { .iocg = iocg };
1130 u64 margin_ns = (u64)(ioc->period_us *
1131 WAITQ_TIMER_MARGIN_PCT / 100) * NSEC_PER_USEC;
1132 u64 vshortage, expires, oexpires;
1134 lockdep_assert_held(&iocg->waitq.lock);
1137 * Wake up the ones which are due and see how much vtime we'll need
1140 current_hweight(iocg, NULL, &ctx.hw_inuse);
1141 ctx.vbudget = now->vnow - atomic64_read(&iocg->vtime);
1142 __wake_up_locked_key(&iocg->waitq, TASK_NORMAL, &ctx);
1143 if (!waitqueue_active(&iocg->waitq))
1145 if (WARN_ON_ONCE(ctx.vbudget >= 0))
1148 /* determine next wakeup, add a quarter margin to guarantee chunking */
1149 vshortage = -ctx.vbudget;
1150 expires = now->now_ns +
1151 DIV64_U64_ROUND_UP(vshortage, now->vrate) * NSEC_PER_USEC;
1152 expires += margin_ns / 4;
1154 /* if already active and close enough, don't bother */
1155 oexpires = ktime_to_ns(hrtimer_get_softexpires(&iocg->waitq_timer));
1156 if (hrtimer_is_queued(&iocg->waitq_timer) &&
1157 abs(oexpires - expires) <= margin_ns / 4)
1160 hrtimer_start_range_ns(&iocg->waitq_timer, ns_to_ktime(expires),
1161 margin_ns / 4, HRTIMER_MODE_ABS);
1164 static enum hrtimer_restart iocg_waitq_timer_fn(struct hrtimer *timer)
1166 struct ioc_gq *iocg = container_of(timer, struct ioc_gq, waitq_timer);
1168 unsigned long flags;
1170 ioc_now(iocg->ioc, &now);
1172 spin_lock_irqsave(&iocg->waitq.lock, flags);
1173 iocg_kick_waitq(iocg, &now);
1174 spin_unlock_irqrestore(&iocg->waitq.lock, flags);
1176 return HRTIMER_NORESTART;
1179 static void iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now, u64 cost)
1181 struct ioc *ioc = iocg->ioc;
1182 struct blkcg_gq *blkg = iocg_to_blkg(iocg);
1183 u64 vtime = atomic64_read(&iocg->vtime);
1184 u64 vmargin = ioc->margin_us * now->vrate;
1185 u64 margin_ns = ioc->margin_us * NSEC_PER_USEC;
1186 u64 expires, oexpires;
1188 /* clear or maintain depending on the overage */
1189 if (time_before_eq64(vtime, now->vnow)) {
1190 blkcg_clear_delay(blkg);
1193 if (!atomic_read(&blkg->use_delay) &&
1194 time_before_eq64(vtime, now->vnow + vmargin))
1199 u64 cost_ns = DIV64_U64_ROUND_UP(cost * NSEC_PER_USEC,
1201 blkcg_add_delay(blkg, now->now_ns, cost_ns);
1203 blkcg_use_delay(blkg);
1205 expires = now->now_ns + DIV64_U64_ROUND_UP(vtime - now->vnow,
1206 now->vrate) * NSEC_PER_USEC;
1208 /* if already active and close enough, don't bother */
1209 oexpires = ktime_to_ns(hrtimer_get_softexpires(&iocg->delay_timer));
1210 if (hrtimer_is_queued(&iocg->delay_timer) &&
1211 abs(oexpires - expires) <= margin_ns / 4)
1214 hrtimer_start_range_ns(&iocg->delay_timer, ns_to_ktime(expires),
1215 margin_ns / 4, HRTIMER_MODE_ABS);
1218 static enum hrtimer_restart iocg_delay_timer_fn(struct hrtimer *timer)
1220 struct ioc_gq *iocg = container_of(timer, struct ioc_gq, delay_timer);
1223 ioc_now(iocg->ioc, &now);
1224 iocg_kick_delay(iocg, &now, 0);
1226 return HRTIMER_NORESTART;
1229 static void ioc_lat_stat(struct ioc *ioc, u32 *missed_ppm_ar, u32 *rq_wait_pct_p)
1231 u32 nr_met[2] = { };
1232 u32 nr_missed[2] = { };
1236 for_each_online_cpu(cpu) {
1237 struct ioc_pcpu_stat *stat = per_cpu_ptr(ioc->pcpu_stat, cpu);
1238 u64 this_rq_wait_ns;
1240 for (rw = READ; rw <= WRITE; rw++) {
1241 u32 this_met = READ_ONCE(stat->missed[rw].nr_met);
1242 u32 this_missed = READ_ONCE(stat->missed[rw].nr_missed);
1244 nr_met[rw] += this_met - stat->missed[rw].last_met;
1245 nr_missed[rw] += this_missed - stat->missed[rw].last_missed;
1246 stat->missed[rw].last_met = this_met;
1247 stat->missed[rw].last_missed = this_missed;
1250 this_rq_wait_ns = READ_ONCE(stat->rq_wait_ns);
1251 rq_wait_ns += this_rq_wait_ns - stat->last_rq_wait_ns;
1252 stat->last_rq_wait_ns = this_rq_wait_ns;
1255 for (rw = READ; rw <= WRITE; rw++) {
1256 if (nr_met[rw] + nr_missed[rw])
1258 DIV64_U64_ROUND_UP((u64)nr_missed[rw] * MILLION,
1259 nr_met[rw] + nr_missed[rw]);
1261 missed_ppm_ar[rw] = 0;
1264 *rq_wait_pct_p = div64_u64(rq_wait_ns * 100,
1265 ioc->period_us * NSEC_PER_USEC);
1268 /* was iocg idle this period? */
1269 static bool iocg_is_idle(struct ioc_gq *iocg)
1271 struct ioc *ioc = iocg->ioc;
1273 /* did something get issued this period? */
1274 if (atomic64_read(&iocg->active_period) ==
1275 atomic64_read(&ioc->cur_period))
1278 /* is something in flight? */
1279 if (atomic64_read(&iocg->done_vtime) < atomic64_read(&iocg->vtime))
1285 /* returns usage with margin added if surplus is large enough */
1286 static u32 surplus_adjusted_hweight_inuse(u32 usage, u32 hw_inuse)
1289 usage = DIV_ROUND_UP(usage * SURPLUS_SCALE_PCT, 100);
1290 usage += SURPLUS_SCALE_ABS;
1292 /* don't bother if the surplus is too small */
1293 if (usage + SURPLUS_MIN_ADJ_DELTA > hw_inuse)
1299 static void ioc_timer_fn(struct timer_list *timer)
1301 struct ioc *ioc = container_of(timer, struct ioc, timer);
1302 struct ioc_gq *iocg, *tiocg;
1304 int nr_surpluses = 0, nr_shortages = 0, nr_lagging = 0;
1305 u32 ppm_rthr = MILLION - ioc->params.qos[QOS_RPPM];
1306 u32 ppm_wthr = MILLION - ioc->params.qos[QOS_WPPM];
1307 u32 missed_ppm[2], rq_wait_pct;
1311 /* how were the latencies during the period? */
1312 ioc_lat_stat(ioc, missed_ppm, &rq_wait_pct);
1314 /* take care of active iocgs */
1315 spin_lock_irq(&ioc->lock);
1319 period_vtime = now.vnow - ioc->period_at_vtime;
1320 if (WARN_ON_ONCE(!period_vtime)) {
1321 spin_unlock_irq(&ioc->lock);
1326 * Waiters determine the sleep durations based on the vrate they
1327 * saw at the time of sleep. If vrate has increased, some waiters
1328 * could be sleeping for too long. Wake up tardy waiters which
1329 * should have woken up in the last period and expire idle iocgs.
1331 list_for_each_entry_safe(iocg, tiocg, &ioc->active_iocgs, active_list) {
1332 if (!waitqueue_active(&iocg->waitq) && !iocg_is_idle(iocg))
1335 spin_lock(&iocg->waitq.lock);
1337 if (waitqueue_active(&iocg->waitq)) {
1338 /* might be oversleeping vtime / hweight changes, kick */
1339 iocg_kick_waitq(iocg, &now);
1340 iocg_kick_delay(iocg, &now, 0);
1341 } else if (iocg_is_idle(iocg)) {
1342 /* no waiter and idle, deactivate */
1343 iocg->last_inuse = iocg->inuse;
1344 __propagate_active_weight(iocg, 0, 0);
1345 list_del_init(&iocg->active_list);
1348 spin_unlock(&iocg->waitq.lock);
1350 commit_active_weights(ioc);
1352 /* calc usages and see whether some weights need to be moved around */
1353 list_for_each_entry(iocg, &ioc->active_iocgs, active_list) {
1354 u64 vdone, vtime, vusage, vmargin, vmin;
1355 u32 hw_active, hw_inuse, usage;
1358 * Collect unused and wind vtime closer to vnow to prevent
1359 * iocgs from accumulating a large amount of budget.
1361 vdone = atomic64_read(&iocg->done_vtime);
1362 vtime = atomic64_read(&iocg->vtime);
1363 current_hweight(iocg, &hw_active, &hw_inuse);
1366 * Latency QoS detection doesn't account for IOs which are
1367 * in-flight for longer than a period. Detect them by
1368 * comparing vdone against period start. If lagging behind
1369 * IOs from past periods, don't increase vrate.
1371 if (!atomic_read(&iocg_to_blkg(iocg)->use_delay) &&
1372 time_after64(vtime, vdone) &&
1373 time_after64(vtime, now.vnow -
1374 MAX_LAGGING_PERIODS * period_vtime) &&
1375 time_before64(vdone, now.vnow - period_vtime))
1378 if (waitqueue_active(&iocg->waitq))
1379 vusage = now.vnow - iocg->last_vtime;
1380 else if (time_before64(iocg->last_vtime, vtime))
1381 vusage = vtime - iocg->last_vtime;
1385 iocg->last_vtime += vusage;
1387 * Factor in in-flight vtime into vusage to avoid
1388 * high-latency completions appearing as idle. This should
1389 * be done after the above ->last_time adjustment.
1391 vusage = max(vusage, vtime - vdone);
1393 /* calculate hweight based usage ratio and record */
1395 usage = DIV64_U64_ROUND_UP(vusage * hw_inuse,
1397 iocg->usage_idx = (iocg->usage_idx + 1) % NR_USAGE_SLOTS;
1398 iocg->usages[iocg->usage_idx] = usage;
1403 /* see whether there's surplus vtime */
1404 vmargin = ioc->margin_us * now.vrate;
1405 vmin = now.vnow - vmargin;
1407 iocg->has_surplus = false;
1409 if (!waitqueue_active(&iocg->waitq) &&
1410 time_before64(vtime, vmin)) {
1411 u64 delta = vmin - vtime;
1413 /* throw away surplus vtime */
1414 atomic64_add(delta, &iocg->vtime);
1415 atomic64_add(delta, &iocg->done_vtime);
1416 iocg->last_vtime += delta;
1417 /* if usage is sufficiently low, maybe it can donate */
1418 if (surplus_adjusted_hweight_inuse(usage, hw_inuse)) {
1419 iocg->has_surplus = true;
1422 } else if (hw_inuse < hw_active) {
1423 u32 new_hwi, new_inuse;
1425 /* was donating but might need to take back some */
1426 if (waitqueue_active(&iocg->waitq)) {
1427 new_hwi = hw_active;
1429 new_hwi = max(hw_inuse,
1430 usage * SURPLUS_SCALE_PCT / 100 +
1434 new_inuse = div64_u64((u64)iocg->inuse * new_hwi,
1436 new_inuse = clamp_t(u32, new_inuse, 1, iocg->active);
1438 if (new_inuse > iocg->inuse) {
1439 TRACE_IOCG_PATH(inuse_takeback, iocg, &now,
1440 iocg->inuse, new_inuse,
1442 __propagate_active_weight(iocg, iocg->weight,
1446 /* genuninely out of vtime */
1451 if (!nr_shortages || !nr_surpluses)
1452 goto skip_surplus_transfers;
1454 /* there are both shortages and surpluses, transfer surpluses */
1455 list_for_each_entry(iocg, &ioc->active_iocgs, active_list) {
1456 u32 usage, hw_active, hw_inuse, new_hwi, new_inuse;
1459 if (!iocg->has_surplus)
1462 /* base the decision on max historical usage */
1463 for (i = 0, usage = 0; i < NR_USAGE_SLOTS; i++) {
1464 if (iocg->usages[i]) {
1465 usage = max(usage, iocg->usages[i]);
1469 if (nr_valid < MIN_VALID_USAGES)
1472 current_hweight(iocg, &hw_active, &hw_inuse);
1473 new_hwi = surplus_adjusted_hweight_inuse(usage, hw_inuse);
1477 new_inuse = DIV64_U64_ROUND_UP((u64)iocg->inuse * new_hwi,
1479 if (new_inuse < iocg->inuse) {
1480 TRACE_IOCG_PATH(inuse_giveaway, iocg, &now,
1481 iocg->inuse, new_inuse,
1483 __propagate_active_weight(iocg, iocg->weight, new_inuse);
1486 skip_surplus_transfers:
1487 commit_active_weights(ioc);
1490 * If q is getting clogged or we're missing too much, we're issuing
1491 * too much IO and should lower vtime rate. If we're not missing
1492 * and experiencing shortages but not surpluses, we're too stingy
1493 * and should increase vtime rate.
1495 if (rq_wait_pct > RQ_WAIT_BUSY_PCT ||
1496 missed_ppm[READ] > ppm_rthr ||
1497 missed_ppm[WRITE] > ppm_wthr) {
1498 ioc->busy_level = max(ioc->busy_level, 0);
1500 } else if (nr_lagging) {
1501 ioc->busy_level = max(ioc->busy_level, 0);
1502 } else if (nr_shortages && !nr_surpluses &&
1503 rq_wait_pct <= RQ_WAIT_BUSY_PCT * UNBUSY_THR_PCT / 100 &&
1504 missed_ppm[READ] <= ppm_rthr * UNBUSY_THR_PCT / 100 &&
1505 missed_ppm[WRITE] <= ppm_wthr * UNBUSY_THR_PCT / 100) {
1506 ioc->busy_level = min(ioc->busy_level, 0);
1509 ioc->busy_level = 0;
1512 ioc->busy_level = clamp(ioc->busy_level, -1000, 1000);
1514 if (ioc->busy_level) {
1515 u64 vrate = atomic64_read(&ioc->vtime_rate);
1516 u64 vrate_min = ioc->vrate_min, vrate_max = ioc->vrate_max;
1518 /* rq_wait signal is always reliable, ignore user vrate_min */
1519 if (rq_wait_pct > RQ_WAIT_BUSY_PCT)
1520 vrate_min = VRATE_MIN;
1523 * If vrate is out of bounds, apply clamp gradually as the
1524 * bounds can change abruptly. Otherwise, apply busy_level
1527 if (vrate < vrate_min) {
1528 vrate = div64_u64(vrate * (100 + VRATE_CLAMP_ADJ_PCT),
1530 vrate = min(vrate, vrate_min);
1531 } else if (vrate > vrate_max) {
1532 vrate = div64_u64(vrate * (100 - VRATE_CLAMP_ADJ_PCT),
1534 vrate = max(vrate, vrate_max);
1536 int idx = min_t(int, abs(ioc->busy_level),
1537 ARRAY_SIZE(vrate_adj_pct) - 1);
1538 u32 adj_pct = vrate_adj_pct[idx];
1540 if (ioc->busy_level > 0)
1541 adj_pct = 100 - adj_pct;
1543 adj_pct = 100 + adj_pct;
1545 vrate = clamp(DIV64_U64_ROUND_UP(vrate * adj_pct, 100),
1546 vrate_min, vrate_max);
1549 trace_iocost_ioc_vrate_adj(ioc, vrate, &missed_ppm, rq_wait_pct,
1550 nr_lagging, nr_shortages,
1553 atomic64_set(&ioc->vtime_rate, vrate);
1554 ioc->inuse_margin_vtime = DIV64_U64_ROUND_UP(
1555 ioc->period_us * vrate * INUSE_MARGIN_PCT, 100);
1558 ioc_refresh_params(ioc, false);
1561 * This period is done. Move onto the next one. If nothing's
1562 * going on with the device, stop the timer.
1564 atomic64_inc(&ioc->cur_period);
1566 if (ioc->running != IOC_STOP) {
1567 if (!list_empty(&ioc->active_iocgs)) {
1568 ioc_start_period(ioc, &now);
1570 ioc->busy_level = 0;
1571 ioc->running = IOC_IDLE;
1575 spin_unlock_irq(&ioc->lock);
1578 static void calc_vtime_cost_builtin(struct bio *bio, struct ioc_gq *iocg,
1579 bool is_merge, u64 *costp)
1581 struct ioc *ioc = iocg->ioc;
1582 u64 coef_seqio, coef_randio, coef_page;
1583 u64 pages = max_t(u64, bio_sectors(bio) >> IOC_SECT_TO_PAGE_SHIFT, 1);
1587 switch (bio_op(bio)) {
1589 coef_seqio = ioc->params.lcoefs[LCOEF_RSEQIO];
1590 coef_randio = ioc->params.lcoefs[LCOEF_RRANDIO];
1591 coef_page = ioc->params.lcoefs[LCOEF_RPAGE];
1594 coef_seqio = ioc->params.lcoefs[LCOEF_WSEQIO];
1595 coef_randio = ioc->params.lcoefs[LCOEF_WRANDIO];
1596 coef_page = ioc->params.lcoefs[LCOEF_WPAGE];
1603 seek_pages = abs(bio->bi_iter.bi_sector - iocg->cursor);
1604 seek_pages >>= IOC_SECT_TO_PAGE_SHIFT;
1608 if (seek_pages > LCOEF_RANDIO_PAGES) {
1609 cost += coef_randio;
1614 cost += pages * coef_page;
1619 static u64 calc_vtime_cost(struct bio *bio, struct ioc_gq *iocg, bool is_merge)
1623 calc_vtime_cost_builtin(bio, iocg, is_merge, &cost);
1627 static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio)
1629 struct blkcg_gq *blkg = bio->bi_blkg;
1630 struct ioc *ioc = rqos_to_ioc(rqos);
1631 struct ioc_gq *iocg = blkg_to_iocg(blkg);
1633 struct iocg_wait wait;
1634 u32 hw_active, hw_inuse;
1635 u64 abs_cost, cost, vtime;
1637 /* bypass IOs if disabled or for root cgroup */
1638 if (!ioc->enabled || !iocg->level)
1641 /* always activate so that even 0 cost IOs get protected to some level */
1642 if (!iocg_activate(iocg, &now))
1645 /* calculate the absolute vtime cost */
1646 abs_cost = calc_vtime_cost(bio, iocg, false);
1650 iocg->cursor = bio_end_sector(bio);
1652 vtime = atomic64_read(&iocg->vtime);
1653 current_hweight(iocg, &hw_active, &hw_inuse);
1655 if (hw_inuse < hw_active &&
1656 time_after_eq64(vtime + ioc->inuse_margin_vtime, now.vnow)) {
1657 TRACE_IOCG_PATH(inuse_reset, iocg, &now,
1658 iocg->inuse, iocg->weight, hw_inuse, hw_active);
1659 spin_lock_irq(&ioc->lock);
1660 propagate_active_weight(iocg, iocg->weight, iocg->weight);
1661 spin_unlock_irq(&ioc->lock);
1662 current_hweight(iocg, &hw_active, &hw_inuse);
1665 cost = abs_cost_to_cost(abs_cost, hw_inuse);
1668 * If no one's waiting and within budget, issue right away. The
1669 * tests are racy but the races aren't systemic - we only miss once
1670 * in a while which is fine.
1672 if (!waitqueue_active(&iocg->waitq) &&
1673 time_before_eq64(vtime + cost, now.vnow)) {
1674 iocg_commit_bio(iocg, bio, cost);
1678 if (bio_issue_as_root_blkg(bio) || fatal_signal_pending(current)) {
1679 iocg_commit_bio(iocg, bio, cost);
1680 iocg_kick_delay(iocg, &now, cost);
1685 * Append self to the waitq and schedule the wakeup timer if we're
1686 * the first waiter. The timer duration is calculated based on the
1687 * current vrate. vtime and hweight changes can make it too short
1688 * or too long. Each wait entry records the absolute cost it's
1689 * waiting for to allow re-evaluation using a custom wait entry.
1691 * If too short, the timer simply reschedules itself. If too long,
1692 * the period timer will notice and trigger wakeups.
1694 * All waiters are on iocg->waitq and the wait states are
1695 * synchronized using waitq.lock.
1697 spin_lock_irq(&iocg->waitq.lock);
1700 * We activated above but w/o any synchronization. Deactivation is
1701 * synchronized with waitq.lock and we won't get deactivated as
1702 * long as we're waiting, so we're good if we're activated here.
1703 * In the unlikely case that we are deactivated, just issue the IO.
1705 if (unlikely(list_empty(&iocg->active_list))) {
1706 spin_unlock_irq(&iocg->waitq.lock);
1707 iocg_commit_bio(iocg, bio, cost);
1711 init_waitqueue_func_entry(&wait.wait, iocg_wake_fn);
1712 wait.wait.private = current;
1714 wait.abs_cost = abs_cost;
1715 wait.committed = false; /* will be set true by waker */
1717 __add_wait_queue_entry_tail(&iocg->waitq, &wait.wait);
1718 iocg_kick_waitq(iocg, &now);
1720 spin_unlock_irq(&iocg->waitq.lock);
1723 set_current_state(TASK_UNINTERRUPTIBLE);
1729 /* waker already committed us, proceed */
1730 finish_wait(&iocg->waitq, &wait.wait);
1733 static void ioc_rqos_merge(struct rq_qos *rqos, struct request *rq,
1736 struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg);
1737 sector_t bio_end = bio_end_sector(bio);
1741 /* add iff the existing request has cost assigned */
1742 if (!rq->bio || !rq->bio->bi_iocost_cost)
1745 abs_cost = calc_vtime_cost(bio, iocg, true);
1749 /* update cursor if backmerging into the request at the cursor */
1750 if (blk_rq_pos(rq) < bio_end &&
1751 blk_rq_pos(rq) + blk_rq_sectors(rq) == iocg->cursor)
1752 iocg->cursor = bio_end;
1754 current_hweight(iocg, NULL, &hw_inuse);
1755 cost = div64_u64(abs_cost * HWEIGHT_WHOLE, hw_inuse);
1756 bio->bi_iocost_cost = cost;
1758 atomic64_add(cost, &iocg->vtime);
1761 static void ioc_rqos_done_bio(struct rq_qos *rqos, struct bio *bio)
1763 struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg);
1765 if (iocg && bio->bi_iocost_cost)
1766 atomic64_add(bio->bi_iocost_cost, &iocg->done_vtime);
1769 static void ioc_rqos_done(struct rq_qos *rqos, struct request *rq)
1771 struct ioc *ioc = rqos_to_ioc(rqos);
1772 u64 on_q_ns, rq_wait_ns;
1775 if (!ioc->enabled || !rq->alloc_time_ns || !rq->start_time_ns)
1778 switch (req_op(rq) & REQ_OP_MASK) {
1791 on_q_ns = ktime_get_ns() - rq->alloc_time_ns;
1792 rq_wait_ns = rq->start_time_ns - rq->alloc_time_ns;
1794 if (on_q_ns <= ioc->params.qos[pidx] * NSEC_PER_USEC)
1795 this_cpu_inc(ioc->pcpu_stat->missed[rw].nr_met);
1797 this_cpu_inc(ioc->pcpu_stat->missed[rw].nr_missed);
1799 this_cpu_add(ioc->pcpu_stat->rq_wait_ns, rq_wait_ns);
1802 static void ioc_rqos_queue_depth_changed(struct rq_qos *rqos)
1804 struct ioc *ioc = rqos_to_ioc(rqos);
1806 spin_lock_irq(&ioc->lock);
1807 ioc_refresh_params(ioc, false);
1808 spin_unlock_irq(&ioc->lock);
1811 static void ioc_rqos_exit(struct rq_qos *rqos)
1813 struct ioc *ioc = rqos_to_ioc(rqos);
1815 blkcg_deactivate_policy(rqos->q, &blkcg_policy_iocost);
1817 spin_lock_irq(&ioc->lock);
1818 ioc->running = IOC_STOP;
1819 spin_unlock_irq(&ioc->lock);
1821 del_timer_sync(&ioc->timer);
1822 free_percpu(ioc->pcpu_stat);
1826 static struct rq_qos_ops ioc_rqos_ops = {
1827 .throttle = ioc_rqos_throttle,
1828 .merge = ioc_rqos_merge,
1829 .done_bio = ioc_rqos_done_bio,
1830 .done = ioc_rqos_done,
1831 .queue_depth_changed = ioc_rqos_queue_depth_changed,
1832 .exit = ioc_rqos_exit,
1835 static int blk_iocost_init(struct request_queue *q)
1838 struct rq_qos *rqos;
1841 ioc = kzalloc(sizeof(*ioc), GFP_KERNEL);
1845 ioc->pcpu_stat = alloc_percpu(struct ioc_pcpu_stat);
1846 if (!ioc->pcpu_stat) {
1852 rqos->id = RQ_QOS_COST;
1853 rqos->ops = &ioc_rqos_ops;
1856 spin_lock_init(&ioc->lock);
1857 timer_setup(&ioc->timer, ioc_timer_fn, 0);
1858 INIT_LIST_HEAD(&ioc->active_iocgs);
1860 ioc->running = IOC_IDLE;
1861 atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC);
1862 seqcount_init(&ioc->period_seqcount);
1863 ioc->period_at = ktime_to_us(ktime_get());
1864 atomic64_set(&ioc->cur_period, 0);
1865 atomic_set(&ioc->hweight_gen, 0);
1867 spin_lock_irq(&ioc->lock);
1868 ioc->autop_idx = AUTOP_INVALID;
1869 ioc_refresh_params(ioc, true);
1870 spin_unlock_irq(&ioc->lock);
1872 rq_qos_add(q, rqos);
1873 ret = blkcg_activate_policy(q, &blkcg_policy_iocost);
1875 rq_qos_del(q, rqos);
1882 static struct blkcg_policy_data *ioc_cpd_alloc(gfp_t gfp)
1884 struct ioc_cgrp *iocc;
1886 iocc = kzalloc(sizeof(struct ioc_cgrp), gfp);
1887 iocc->dfl_weight = CGROUP_WEIGHT_DFL;
1892 static void ioc_cpd_free(struct blkcg_policy_data *cpd)
1894 kfree(container_of(cpd, struct ioc_cgrp, cpd));
1897 static struct blkg_policy_data *ioc_pd_alloc(gfp_t gfp, struct request_queue *q,
1898 struct blkcg *blkcg)
1900 int levels = blkcg->css.cgroup->level + 1;
1901 struct ioc_gq *iocg;
1903 iocg = kzalloc_node(sizeof(*iocg) + levels * sizeof(iocg->ancestors[0]),
1911 static void ioc_pd_init(struct blkg_policy_data *pd)
1913 struct ioc_gq *iocg = pd_to_iocg(pd);
1914 struct blkcg_gq *blkg = pd_to_blkg(&iocg->pd);
1915 struct ioc *ioc = q_to_ioc(blkg->q);
1917 struct blkcg_gq *tblkg;
1918 unsigned long flags;
1923 atomic64_set(&iocg->vtime, now.vnow);
1924 atomic64_set(&iocg->done_vtime, now.vnow);
1925 atomic64_set(&iocg->active_period, atomic64_read(&ioc->cur_period));
1926 INIT_LIST_HEAD(&iocg->active_list);
1927 iocg->hweight_active = HWEIGHT_WHOLE;
1928 iocg->hweight_inuse = HWEIGHT_WHOLE;
1930 init_waitqueue_head(&iocg->waitq);
1931 hrtimer_init(&iocg->waitq_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1932 iocg->waitq_timer.function = iocg_waitq_timer_fn;
1933 hrtimer_init(&iocg->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1934 iocg->delay_timer.function = iocg_delay_timer_fn;
1936 iocg->level = blkg->blkcg->css.cgroup->level;
1938 for (tblkg = blkg; tblkg; tblkg = tblkg->parent) {
1939 struct ioc_gq *tiocg = blkg_to_iocg(tblkg);
1940 iocg->ancestors[tiocg->level] = tiocg;
1943 spin_lock_irqsave(&ioc->lock, flags);
1944 weight_updated(iocg);
1945 spin_unlock_irqrestore(&ioc->lock, flags);
1948 static void ioc_pd_free(struct blkg_policy_data *pd)
1950 struct ioc_gq *iocg = pd_to_iocg(pd);
1951 struct ioc *ioc = iocg->ioc;
1954 hrtimer_cancel(&iocg->waitq_timer);
1955 hrtimer_cancel(&iocg->delay_timer);
1957 spin_lock(&ioc->lock);
1958 if (!list_empty(&iocg->active_list)) {
1959 propagate_active_weight(iocg, 0, 0);
1960 list_del_init(&iocg->active_list);
1962 spin_unlock(&ioc->lock);
1967 static u64 ioc_weight_prfill(struct seq_file *sf, struct blkg_policy_data *pd,
1970 const char *dname = blkg_dev_name(pd->blkg);
1971 struct ioc_gq *iocg = pd_to_iocg(pd);
1973 if (dname && iocg->cfg_weight)
1974 seq_printf(sf, "%s %u\n", dname, iocg->cfg_weight);
1979 static int ioc_weight_show(struct seq_file *sf, void *v)
1981 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
1982 struct ioc_cgrp *iocc = blkcg_to_iocc(blkcg);
1984 seq_printf(sf, "default %u\n", iocc->dfl_weight);
1985 blkcg_print_blkgs(sf, blkcg, ioc_weight_prfill,
1986 &blkcg_policy_iocost, seq_cft(sf)->private, false);
1990 static ssize_t ioc_weight_write(struct kernfs_open_file *of, char *buf,
1991 size_t nbytes, loff_t off)
1993 struct blkcg *blkcg = css_to_blkcg(of_css(of));
1994 struct ioc_cgrp *iocc = blkcg_to_iocc(blkcg);
1995 struct blkg_conf_ctx ctx;
1996 struct ioc_gq *iocg;
2000 if (!strchr(buf, ':')) {
2001 struct blkcg_gq *blkg;
2003 if (!sscanf(buf, "default %u", &v) && !sscanf(buf, "%u", &v))
2006 if (v < CGROUP_WEIGHT_MIN || v > CGROUP_WEIGHT_MAX)
2009 spin_lock(&blkcg->lock);
2010 iocc->dfl_weight = v;
2011 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
2012 struct ioc_gq *iocg = blkg_to_iocg(blkg);
2015 spin_lock_irq(&iocg->ioc->lock);
2016 weight_updated(iocg);
2017 spin_unlock_irq(&iocg->ioc->lock);
2020 spin_unlock(&blkcg->lock);
2025 ret = blkg_conf_prep(blkcg, &blkcg_policy_iocost, buf, &ctx);
2029 iocg = blkg_to_iocg(ctx.blkg);
2031 if (!strncmp(ctx.body, "default", 7)) {
2034 if (!sscanf(ctx.body, "%u", &v))
2036 if (v < CGROUP_WEIGHT_MIN || v > CGROUP_WEIGHT_MAX)
2040 spin_lock_irq(&iocg->ioc->lock);
2041 iocg->cfg_weight = v;
2042 weight_updated(iocg);
2043 spin_unlock_irq(&iocg->ioc->lock);
2045 blkg_conf_finish(&ctx);
2049 blkg_conf_finish(&ctx);
2053 static u64 ioc_qos_prfill(struct seq_file *sf, struct blkg_policy_data *pd,
2056 const char *dname = blkg_dev_name(pd->blkg);
2057 struct ioc *ioc = pd_to_iocg(pd)->ioc;
2062 seq_printf(sf, "%s enable=%d ctrl=%s rpct=%u.%02u rlat=%u wpct=%u.%02u wlat=%u min=%u.%02u max=%u.%02u\n",
2063 dname, ioc->enabled, ioc->user_qos_params ? "user" : "auto",
2064 ioc->params.qos[QOS_RPPM] / 10000,
2065 ioc->params.qos[QOS_RPPM] % 10000 / 100,
2066 ioc->params.qos[QOS_RLAT],
2067 ioc->params.qos[QOS_WPPM] / 10000,
2068 ioc->params.qos[QOS_WPPM] % 10000 / 100,
2069 ioc->params.qos[QOS_WLAT],
2070 ioc->params.qos[QOS_MIN] / 10000,
2071 ioc->params.qos[QOS_MIN] % 10000 / 100,
2072 ioc->params.qos[QOS_MAX] / 10000,
2073 ioc->params.qos[QOS_MAX] % 10000 / 100);
2077 static int ioc_qos_show(struct seq_file *sf, void *v)
2079 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
2081 blkcg_print_blkgs(sf, blkcg, ioc_qos_prfill,
2082 &blkcg_policy_iocost, seq_cft(sf)->private, false);
2086 static const match_table_t qos_ctrl_tokens = {
2087 { QOS_ENABLE, "enable=%u" },
2088 { QOS_CTRL, "ctrl=%s" },
2089 { NR_QOS_CTRL_PARAMS, NULL },
2092 static const match_table_t qos_tokens = {
2093 { QOS_RPPM, "rpct=%s" },
2094 { QOS_RLAT, "rlat=%u" },
2095 { QOS_WPPM, "wpct=%s" },
2096 { QOS_WLAT, "wlat=%u" },
2097 { QOS_MIN, "min=%s" },
2098 { QOS_MAX, "max=%s" },
2099 { NR_QOS_PARAMS, NULL },
2102 static ssize_t ioc_qos_write(struct kernfs_open_file *of, char *input,
2103 size_t nbytes, loff_t off)
2105 struct gendisk *disk;
2107 u32 qos[NR_QOS_PARAMS];
2112 disk = blkcg_conf_get_disk(&input);
2114 return PTR_ERR(disk);
2116 ioc = q_to_ioc(disk->queue);
2118 ret = blk_iocost_init(disk->queue);
2121 ioc = q_to_ioc(disk->queue);
2124 spin_lock_irq(&ioc->lock);
2125 memcpy(qos, ioc->params.qos, sizeof(qos));
2126 enable = ioc->enabled;
2127 user = ioc->user_qos_params;
2128 spin_unlock_irq(&ioc->lock);
2130 while ((p = strsep(&input, " \t\n"))) {
2131 substring_t args[MAX_OPT_ARGS];
2139 switch (match_token(p, qos_ctrl_tokens, args)) {
2141 match_u64(&args[0], &v);
2145 match_strlcpy(buf, &args[0], sizeof(buf));
2146 if (!strcmp(buf, "auto"))
2148 else if (!strcmp(buf, "user"))
2155 tok = match_token(p, qos_tokens, args);
2159 if (match_strlcpy(buf, &args[0], sizeof(buf)) >=
2162 if (cgroup_parse_float(buf, 2, &v))
2164 if (v < 0 || v > 10000)
2170 if (match_u64(&args[0], &v))
2176 if (match_strlcpy(buf, &args[0], sizeof(buf)) >=
2179 if (cgroup_parse_float(buf, 2, &v))
2183 qos[tok] = clamp_t(s64, v * 100,
2184 VRATE_MIN_PPM, VRATE_MAX_PPM);
2192 if (qos[QOS_MIN] > qos[QOS_MAX])
2195 spin_lock_irq(&ioc->lock);
2198 blk_queue_flag_set(QUEUE_FLAG_RQ_ALLOC_TIME, ioc->rqos.q);
2199 ioc->enabled = true;
2201 blk_queue_flag_clear(QUEUE_FLAG_RQ_ALLOC_TIME, ioc->rqos.q);
2202 ioc->enabled = false;
2206 memcpy(ioc->params.qos, qos, sizeof(qos));
2207 ioc->user_qos_params = true;
2209 ioc->user_qos_params = false;
2212 ioc_refresh_params(ioc, true);
2213 spin_unlock_irq(&ioc->lock);
2215 put_disk_and_module(disk);
2220 put_disk_and_module(disk);
2224 static u64 ioc_cost_model_prfill(struct seq_file *sf,
2225 struct blkg_policy_data *pd, int off)
2227 const char *dname = blkg_dev_name(pd->blkg);
2228 struct ioc *ioc = pd_to_iocg(pd)->ioc;
2229 u64 *u = ioc->params.i_lcoefs;
2234 seq_printf(sf, "%s ctrl=%s model=linear "
2235 "rbps=%llu rseqiops=%llu rrandiops=%llu "
2236 "wbps=%llu wseqiops=%llu wrandiops=%llu\n",
2237 dname, ioc->user_cost_model ? "user" : "auto",
2238 u[I_LCOEF_RBPS], u[I_LCOEF_RSEQIOPS], u[I_LCOEF_RRANDIOPS],
2239 u[I_LCOEF_WBPS], u[I_LCOEF_WSEQIOPS], u[I_LCOEF_WRANDIOPS]);
2243 static int ioc_cost_model_show(struct seq_file *sf, void *v)
2245 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
2247 blkcg_print_blkgs(sf, blkcg, ioc_cost_model_prfill,
2248 &blkcg_policy_iocost, seq_cft(sf)->private, false);
2252 static const match_table_t cost_ctrl_tokens = {
2253 { COST_CTRL, "ctrl=%s" },
2254 { COST_MODEL, "model=%s" },
2255 { NR_COST_CTRL_PARAMS, NULL },
2258 static const match_table_t i_lcoef_tokens = {
2259 { I_LCOEF_RBPS, "rbps=%u" },
2260 { I_LCOEF_RSEQIOPS, "rseqiops=%u" },
2261 { I_LCOEF_RRANDIOPS, "rrandiops=%u" },
2262 { I_LCOEF_WBPS, "wbps=%u" },
2263 { I_LCOEF_WSEQIOPS, "wseqiops=%u" },
2264 { I_LCOEF_WRANDIOPS, "wrandiops=%u" },
2265 { NR_I_LCOEFS, NULL },
2268 static ssize_t ioc_cost_model_write(struct kernfs_open_file *of, char *input,
2269 size_t nbytes, loff_t off)
2271 struct gendisk *disk;
2278 disk = blkcg_conf_get_disk(&input);
2280 return PTR_ERR(disk);
2282 ioc = q_to_ioc(disk->queue);
2284 ret = blk_iocost_init(disk->queue);
2287 ioc = q_to_ioc(disk->queue);
2290 spin_lock_irq(&ioc->lock);
2291 memcpy(u, ioc->params.i_lcoefs, sizeof(u));
2292 user = ioc->user_cost_model;
2293 spin_unlock_irq(&ioc->lock);
2295 while ((p = strsep(&input, " \t\n"))) {
2296 substring_t args[MAX_OPT_ARGS];
2304 switch (match_token(p, cost_ctrl_tokens, args)) {
2306 match_strlcpy(buf, &args[0], sizeof(buf));
2307 if (!strcmp(buf, "auto"))
2309 else if (!strcmp(buf, "user"))
2315 match_strlcpy(buf, &args[0], sizeof(buf));
2316 if (strcmp(buf, "linear"))
2321 tok = match_token(p, i_lcoef_tokens, args);
2322 if (tok == NR_I_LCOEFS)
2324 if (match_u64(&args[0], &v))
2330 spin_lock_irq(&ioc->lock);
2332 memcpy(ioc->params.i_lcoefs, u, sizeof(u));
2333 ioc->user_cost_model = true;
2335 ioc->user_cost_model = false;
2337 ioc_refresh_params(ioc, true);
2338 spin_unlock_irq(&ioc->lock);
2340 put_disk_and_module(disk);
2346 put_disk_and_module(disk);
2350 static struct cftype ioc_files[] = {
2353 .flags = CFTYPE_NOT_ON_ROOT,
2354 .seq_show = ioc_weight_show,
2355 .write = ioc_weight_write,
2359 .flags = CFTYPE_ONLY_ON_ROOT,
2360 .seq_show = ioc_qos_show,
2361 .write = ioc_qos_write,
2364 .name = "cost.model",
2365 .flags = CFTYPE_ONLY_ON_ROOT,
2366 .seq_show = ioc_cost_model_show,
2367 .write = ioc_cost_model_write,
2372 static struct blkcg_policy blkcg_policy_iocost = {
2373 .dfl_cftypes = ioc_files,
2374 .cpd_alloc_fn = ioc_cpd_alloc,
2375 .cpd_free_fn = ioc_cpd_free,
2376 .pd_alloc_fn = ioc_pd_alloc,
2377 .pd_init_fn = ioc_pd_init,
2378 .pd_free_fn = ioc_pd_free,
2381 static int __init ioc_init(void)
2383 return blkcg_policy_register(&blkcg_policy_iocost);
2386 static void __exit ioc_exit(void)
2388 return blkcg_policy_unregister(&blkcg_policy_iocost);
2391 module_init(ioc_init);
2392 module_exit(ioc_exit);