blk-mq: fix two misuses on RQF_USE_SCHED
[linux-block.git] / block / blk-iocost.c
CommitLineData
7caa4715
TH
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * IO cost model based controller.
4 *
5 * Copyright (C) 2019 Tejun Heo <tj@kernel.org>
6 * Copyright (C) 2019 Andy Newell <newella@fb.com>
7 * Copyright (C) 2019 Facebook
8 *
9 * One challenge of controlling IO resources is the lack of trivially
10 * observable cost metric. This is distinguished from CPU and memory where
11 * wallclock time and the number of bytes can serve as accurate enough
12 * approximations.
13 *
14 * Bandwidth and iops are the most commonly used metrics for IO devices but
15 * depending on the type and specifics of the device, different IO patterns
16 * easily lead to multiple orders of magnitude variations rendering them
17 * useless for the purpose of IO capacity distribution. While on-device
18 * time, with a lot of clutches, could serve as a useful approximation for
19 * non-queued rotational devices, this is no longer viable with modern
20 * devices, even the rotational ones.
21 *
22 * While there is no cost metric we can trivially observe, it isn't a
23 * complete mystery. For example, on a rotational device, seek cost
24 * dominates while a contiguous transfer contributes a smaller amount
25 * proportional to the size. If we can characterize at least the relative
26 * costs of these different types of IOs, it should be possible to
27 * implement a reasonable work-conserving proportional IO resource
28 * distribution.
29 *
30 * 1. IO Cost Model
31 *
32 * IO cost model estimates the cost of an IO given its basic parameters and
33 * history (e.g. the end sector of the last IO). The cost is measured in
34 * device time. If a given IO is estimated to cost 10ms, the device should
35 * be able to process ~100 of those IOs in a second.
36 *
37 * Currently, there's only one builtin cost model - linear. Each IO is
38 * classified as sequential or random and given a base cost accordingly.
39 * On top of that, a size cost proportional to the length of the IO is
40 * added. While simple, this model captures the operational
41 * characteristics of a wide varienty of devices well enough. Default
5ba1add2 42 * parameters for several different classes of devices are provided and the
7caa4715
TH
43 * parameters can be configured from userspace via
44 * /sys/fs/cgroup/io.cost.model.
45 *
46 * If needed, tools/cgroup/iocost_coef_gen.py can be used to generate
47 * device-specific coefficients.
48 *
49 * 2. Control Strategy
50 *
51 * The device virtual time (vtime) is used as the primary control metric.
52 * The control strategy is composed of the following three parts.
53 *
54 * 2-1. Vtime Distribution
55 *
56 * When a cgroup becomes active in terms of IOs, its hierarchical share is
57 * calculated. Please consider the following hierarchy where the numbers
58 * inside parentheses denote the configured weights.
59 *
60 * root
61 * / \
62 * A (w:100) B (w:300)
63 * / \
64 * A0 (w:100) A1 (w:100)
65 *
66 * If B is idle and only A0 and A1 are actively issuing IOs, as the two are
67 * of equal weight, each gets 50% share. If then B starts issuing IOs, B
68 * gets 300/(100+300) or 75% share, and A0 and A1 equally splits the rest,
69 * 12.5% each. The distribution mechanism only cares about these flattened
70 * shares. They're called hweights (hierarchical weights) and always add
fe20cdb5 71 * upto 1 (WEIGHT_ONE).
7caa4715
TH
72 *
73 * A given cgroup's vtime runs slower in inverse proportion to its hweight.
74 * For example, with 12.5% weight, A0's time runs 8 times slower (100/12.5)
75 * against the device vtime - an IO which takes 10ms on the underlying
76 * device is considered to take 80ms on A0.
77 *
78 * This constitutes the basis of IO capacity distribution. Each cgroup's
79 * vtime is running at a rate determined by its hweight. A cgroup tracks
5ba1add2 80 * the vtime consumed by past IOs and can issue a new IO if doing so
7caa4715
TH
81 * wouldn't outrun the current device vtime. Otherwise, the IO is
82 * suspended until the vtime has progressed enough to cover it.
83 *
84 * 2-2. Vrate Adjustment
85 *
86 * It's unrealistic to expect the cost model to be perfect. There are too
87 * many devices and even on the same device the overall performance
88 * fluctuates depending on numerous factors such as IO mixture and device
89 * internal garbage collection. The controller needs to adapt dynamically.
90 *
91 * This is achieved by adjusting the overall IO rate according to how busy
92 * the device is. If the device becomes overloaded, we're sending down too
93 * many IOs and should generally slow down. If there are waiting issuers
94 * but the device isn't saturated, we're issuing too few and should
95 * generally speed up.
96 *
97 * To slow down, we lower the vrate - the rate at which the device vtime
98 * passes compared to the wall clock. For example, if the vtime is running
99 * at the vrate of 75%, all cgroups added up would only be able to issue
100 * 750ms worth of IOs per second, and vice-versa for speeding up.
101 *
102 * Device business is determined using two criteria - rq wait and
103 * completion latencies.
104 *
105 * When a device gets saturated, the on-device and then the request queues
106 * fill up and a bio which is ready to be issued has to wait for a request
107 * to become available. When this delay becomes noticeable, it's a clear
108 * indication that the device is saturated and we lower the vrate. This
109 * saturation signal is fairly conservative as it only triggers when both
110 * hardware and software queues are filled up, and is used as the default
111 * busy signal.
112 *
113 * As devices can have deep queues and be unfair in how the queued commands
ecaaaabe 114 * are executed, solely depending on rq wait may not result in satisfactory
7caa4715
TH
115 * control quality. For a better control quality, completion latency QoS
116 * parameters can be configured so that the device is considered saturated
117 * if N'th percentile completion latency rises above the set point.
118 *
119 * The completion latency requirements are a function of both the
120 * underlying device characteristics and the desired IO latency quality of
121 * service. There is an inherent trade-off - the tighter the latency QoS,
122 * the higher the bandwidth lossage. Latency QoS is disabled by default
123 * and can be set through /sys/fs/cgroup/io.cost.qos.
124 *
125 * 2-3. Work Conservation
126 *
127 * Imagine two cgroups A and B with equal weights. A is issuing a small IO
128 * periodically while B is sending out enough parallel IOs to saturate the
129 * device on its own. Let's say A's usage amounts to 100ms worth of IO
130 * cost per second, i.e., 10% of the device capacity. The naive
131 * distribution of half and half would lead to 60% utilization of the
132 * device, a significant reduction in the total amount of work done
133 * compared to free-for-all competition. This is too high a cost to pay
134 * for IO control.
135 *
136 * To conserve the total amount of work done, we keep track of how much
137 * each active cgroup is actually using and yield part of its weight if
138 * there are other cgroups which can make use of it. In the above case,
139 * A's weight will be lowered so that it hovers above the actual usage and
140 * B would be able to use the rest.
141 *
142 * As we don't want to penalize a cgroup for donating its weight, the
143 * surplus weight adjustment factors in a margin and has an immediate
144 * snapback mechanism in case the cgroup needs more IO vtime for itself.
145 *
146 * Note that adjusting down surplus weights has the same effects as
147 * accelerating vtime for other cgroups and work conservation can also be
148 * implemented by adjusting vrate dynamically. However, squaring who can
149 * donate and should take back how much requires hweight propagations
150 * anyway making it easier to implement and understand as a separate
151 * mechanism.
6954ff18
TH
152 *
153 * 3. Monitoring
154 *
155 * Instead of debugfs or other clumsy monitoring mechanisms, this
156 * controller uses a drgn based monitoring script -
157 * tools/cgroup/iocost_monitor.py. For details on drgn, please see
5ba1add2 158 * https://github.com/osandov/drgn. The output looks like the following.
6954ff18
TH
159 *
160 * sdb RUN per=300ms cur_per=234.218:v203.695 busy= +1 vrate= 62.12%
7c1ee704
TH
161 * active weight hweight% inflt% dbt delay usages%
162 * test/a * 50/ 50 33.33/ 33.33 27.65 2 0*041 033:033:033
163 * test/b * 100/ 100 66.67/ 66.67 17.56 0 0*000 066:079:077
6954ff18
TH
164 *
165 * - per : Timer period
166 * - cur_per : Internal wall and device vtime clock
167 * - vrate : Device virtual time rate against wall clock
168 * - weight : Surplus-adjusted and configured weights
169 * - hweight : Surplus-adjusted and configured hierarchical weights
170 * - inflt : The percentage of in-flight IO cost at the end of last period
171 * - del_ms : Deferred issuer delay induction level and duration
172 * - usages : Usage history
7caa4715
TH
173 */
174
175#include <linux/kernel.h>
176#include <linux/module.h>
177#include <linux/timer.h>
178#include <linux/time64.h>
179#include <linux/parser.h>
180#include <linux/sched/signal.h>
5e124f74
TH
181#include <asm/local.h>
182#include <asm/local64.h>
7caa4715
TH
183#include "blk-rq-qos.h"
184#include "blk-stat.h"
185#include "blk-wbt.h"
672fdcf0 186#include "blk-cgroup.h"
7caa4715
TH
187
188#ifdef CONFIG_TRACEPOINTS
189
190/* copied from TRACE_CGROUP_PATH, see cgroup-internal.h */
191#define TRACE_IOCG_PATH_LEN 1024
192static DEFINE_SPINLOCK(trace_iocg_path_lock);
193static char trace_iocg_path[TRACE_IOCG_PATH_LEN];
194
195#define TRACE_IOCG_PATH(type, iocg, ...) \
196 do { \
197 unsigned long flags; \
198 if (trace_iocost_##type##_enabled()) { \
199 spin_lock_irqsave(&trace_iocg_path_lock, flags); \
200 cgroup_path(iocg_to_blkg(iocg)->blkcg->css.cgroup, \
201 trace_iocg_path, TRACE_IOCG_PATH_LEN); \
202 trace_iocost_##type(iocg, trace_iocg_path, \
203 ##__VA_ARGS__); \
204 spin_unlock_irqrestore(&trace_iocg_path_lock, flags); \
205 } \
206 } while (0)
207
208#else /* CONFIG_TRACE_POINTS */
209#define TRACE_IOCG_PATH(type, iocg, ...) do { } while (0)
210#endif /* CONFIG_TRACE_POINTS */
211
212enum {
213 MILLION = 1000000,
214
215 /* timer period is calculated from latency requirements, bound it */
216 MIN_PERIOD = USEC_PER_MSEC,
217 MAX_PERIOD = USEC_PER_SEC,
218
219 /*
f1de2439 220 * iocg->vtime is targeted at 50% behind the device vtime, which
7caa4715
TH
221 * serves as its IO credit buffer. Surplus weight adjustment is
222 * immediately canceled if the vtime margin runs below 10%.
223 */
7ca5b2e6 224 MARGIN_MIN_PCT = 10,
f1de2439
TH
225 MARGIN_LOW_PCT = 20,
226 MARGIN_TARGET_PCT = 50,
7caa4715 227
b0853ab4
TH
228 INUSE_ADJ_STEP_PCT = 25,
229
7ca5b2e6
TH
230 /* Have some play in timer operations */
231 TIMER_SLACK_PCT = 1,
7caa4715 232
7caa4715 233 /* 1/64k is granular enough and can easily be handled w/ u32 */
fe20cdb5 234 WEIGHT_ONE = 1 << 16,
ff1cc97b 235};
7caa4715 236
ff1cc97b 237enum {
7caa4715
TH
238 /*
239 * As vtime is used to calculate the cost of each IO, it needs to
240 * be fairly high precision. For example, it should be able to
241 * represent the cost of a single page worth of discard with
242 * suffificient accuracy. At the same time, it should be able to
243 * represent reasonably long enough durations to be useful and
244 * convenient during operation.
245 *
246 * 1s worth of vtime is 2^37. This gives us both sub-nanosecond
247 * granularity and days of wrap-around time even at extreme vrates.
248 */
249 VTIME_PER_SEC_SHIFT = 37,
250 VTIME_PER_SEC = 1LLU << VTIME_PER_SEC_SHIFT,
251 VTIME_PER_USEC = VTIME_PER_SEC / USEC_PER_SEC,
cd006509 252 VTIME_PER_NSEC = VTIME_PER_SEC / NSEC_PER_SEC,
7caa4715
TH
253
254 /* bound vrate adjustments within two orders of magnitude */
255 VRATE_MIN_PPM = 10000, /* 1% */
256 VRATE_MAX_PPM = 100000000, /* 10000% */
257
258 VRATE_MIN = VTIME_PER_USEC * VRATE_MIN_PPM / MILLION,
259 VRATE_CLAMP_ADJ_PCT = 4,
260
5f2779df
AB
261 /* switch iff the conditions are met for longer than this */
262 AUTOP_CYCLE_NSEC = 10LLU * NSEC_PER_SEC,
263};
264
265enum {
7caa4715
TH
266 /* if IOs end up waiting for requests, issue less */
267 RQ_WAIT_BUSY_PCT = 5,
268
269 /* unbusy hysterisis */
270 UNBUSY_THR_PCT = 75,
271
5160a5a5
TH
272 /*
273 * The effect of delay is indirect and non-linear and a huge amount of
274 * future debt can accumulate abruptly while unthrottled. Linearly scale
275 * up delay as debt is going up and then let it decay exponentially.
276 * This gives us quick ramp ups while delay is accumulating and long
277 * tails which can help reducing the frequency of debt explosions on
278 * unthrottle. The parameters are experimentally determined.
279 *
280 * The delay mechanism provides adequate protection and behavior in many
281 * cases. However, this is far from ideal and falls shorts on both
282 * fronts. The debtors are often throttled too harshly costing a
283 * significant level of fairness and possibly total work while the
284 * protection against their impacts on the system can be choppy and
285 * unreliable.
286 *
287 * The shortcoming primarily stems from the fact that, unlike for page
288 * cache, the kernel doesn't have well-defined back-pressure propagation
289 * mechanism and policies for anonymous memory. Fully addressing this
290 * issue will likely require substantial improvements in the area.
291 */
292 MIN_DELAY_THR_PCT = 500,
293 MAX_DELAY_THR_PCT = 25000,
294 MIN_DELAY = 250,
295 MAX_DELAY = 250 * USEC_PER_MSEC,
296
c7af2a00
TH
297 /* halve debts if avg usage over 100ms is under 50% */
298 DFGV_USAGE_PCT = 50,
299 DFGV_PERIOD = 100 * USEC_PER_MSEC,
dda1315f 300
7caa4715
TH
301 /* don't let cmds which take a very long time pin lagging for too long */
302 MAX_LAGGING_PERIODS = 10,
303
7caa4715
TH
304 /*
305 * Count IO size in 4k pages. The 12bit shift helps keeping
306 * size-proportional components of cost calculation in closer
307 * numbers of digits to per-IO cost components.
308 */
309 IOC_PAGE_SHIFT = 12,
310 IOC_PAGE_SIZE = 1 << IOC_PAGE_SHIFT,
311 IOC_SECT_TO_PAGE_SHIFT = IOC_PAGE_SHIFT - SECTOR_SHIFT,
312
313 /* if apart further than 16M, consider randio for linear model */
314 LCOEF_RANDIO_PAGES = 4096,
315};
316
317enum ioc_running {
318 IOC_IDLE,
319 IOC_RUNNING,
320 IOC_STOP,
321};
322
323/* io.cost.qos controls including per-dev enable of the whole controller */
324enum {
325 QOS_ENABLE,
326 QOS_CTRL,
327 NR_QOS_CTRL_PARAMS,
328};
329
330/* io.cost.qos params */
331enum {
332 QOS_RPPM,
333 QOS_RLAT,
334 QOS_WPPM,
335 QOS_WLAT,
336 QOS_MIN,
337 QOS_MAX,
338 NR_QOS_PARAMS,
339};
340
341/* io.cost.model controls */
342enum {
343 COST_CTRL,
344 COST_MODEL,
345 NR_COST_CTRL_PARAMS,
346};
347
348/* builtin linear cost model coefficients */
349enum {
350 I_LCOEF_RBPS,
351 I_LCOEF_RSEQIOPS,
352 I_LCOEF_RRANDIOPS,
353 I_LCOEF_WBPS,
354 I_LCOEF_WSEQIOPS,
355 I_LCOEF_WRANDIOPS,
356 NR_I_LCOEFS,
357};
358
359enum {
360 LCOEF_RPAGE,
361 LCOEF_RSEQIO,
362 LCOEF_RRANDIO,
363 LCOEF_WPAGE,
364 LCOEF_WSEQIO,
365 LCOEF_WRANDIO,
366 NR_LCOEFS,
367};
368
369enum {
370 AUTOP_INVALID,
371 AUTOP_HDD,
372 AUTOP_SSD_QD1,
373 AUTOP_SSD_DFL,
374 AUTOP_SSD_FAST,
375};
376
7caa4715
TH
377struct ioc_params {
378 u32 qos[NR_QOS_PARAMS];
379 u64 i_lcoefs[NR_I_LCOEFS];
380 u64 lcoefs[NR_LCOEFS];
381 u32 too_fast_vrate_pct;
382 u32 too_slow_vrate_pct;
383};
384
7ca5b2e6
TH
385struct ioc_margins {
386 s64 min;
f1de2439
TH
387 s64 low;
388 s64 target;
7ca5b2e6
TH
389};
390
7caa4715 391struct ioc_missed {
5e124f74
TH
392 local_t nr_met;
393 local_t nr_missed;
7caa4715
TH
394 u32 last_met;
395 u32 last_missed;
396};
397
398struct ioc_pcpu_stat {
399 struct ioc_missed missed[2];
400
5e124f74 401 local64_t rq_wait_ns;
7caa4715
TH
402 u64 last_rq_wait_ns;
403};
404
405/* per device */
406struct ioc {
407 struct rq_qos rqos;
408
409 bool enabled;
410
411 struct ioc_params params;
7ca5b2e6 412 struct ioc_margins margins;
7caa4715 413 u32 period_us;
7ca5b2e6 414 u32 timer_slack_ns;
7caa4715
TH
415 u64 vrate_min;
416 u64 vrate_max;
417
418 spinlock_t lock;
419 struct timer_list timer;
420 struct list_head active_iocgs; /* active cgroups */
421 struct ioc_pcpu_stat __percpu *pcpu_stat;
422
423 enum ioc_running running;
424 atomic64_t vtime_rate;
ac33e91e
TH
425 u64 vtime_base_rate;
426 s64 vtime_err;
7caa4715 427
67b7b641 428 seqcount_spinlock_t period_seqcount;
ce95570a 429 u64 period_at; /* wallclock starttime */
7caa4715
TH
430 u64 period_at_vtime; /* vtime starttime */
431
432 atomic64_t cur_period; /* inc'd each period */
433 int busy_level; /* saturation history */
434
7caa4715
TH
435 bool weights_updated;
436 atomic_t hweight_gen; /* for lazy hweights */
437
c7af2a00
TH
438 /* debt forgivness */
439 u64 dfgv_period_at;
440 u64 dfgv_period_rem;
441 u64 dfgv_usage_us_sum;
dda1315f 442
7caa4715
TH
443 u64 autop_too_fast_at;
444 u64 autop_too_slow_at;
445 int autop_idx;
446 bool user_qos_params:1;
447 bool user_cost_model:1;
448};
449
97eb1975
TH
450struct iocg_pcpu_stat {
451 local64_t abs_vusage;
452};
453
454struct iocg_stat {
455 u64 usage_us;
f0bf84a5
TH
456 u64 wait_us;
457 u64 indebt_us;
458 u64 indelay_us;
97eb1975
TH
459};
460
7caa4715
TH
461/* per device-cgroup pair */
462struct ioc_gq {
463 struct blkg_policy_data pd;
464 struct ioc *ioc;
465
466 /*
467 * A iocg can get its weight from two sources - an explicit
468 * per-device-cgroup configuration or the default weight of the
469 * cgroup. `cfg_weight` is the explicit per-device-cgroup
470 * configuration. `weight` is the effective considering both
471 * sources.
472 *
473 * When an idle cgroup becomes active its `active` goes from 0 to
474 * `weight`. `inuse` is the surplus adjusted active weight.
475 * `active` and `inuse` are used to calculate `hweight_active` and
476 * `hweight_inuse`.
477 *
478 * `last_inuse` remembers `inuse` while an iocg is idle to persist
479 * surplus adjustments.
b0853ab4
TH
480 *
481 * `inuse` may be adjusted dynamically during period. `saved_*` are used
482 * to determine and track adjustments.
7caa4715
TH
483 */
484 u32 cfg_weight;
485 u32 weight;
486 u32 active;
487 u32 inuse;
b0853ab4 488
7caa4715 489 u32 last_inuse;
b0853ab4 490 s64 saved_margin;
7caa4715
TH
491
492 sector_t cursor; /* to detect randio */
493
494 /*
495 * `vtime` is this iocg's vtime cursor which progresses as IOs are
496 * issued. If lagging behind device vtime, the delta represents
5ba1add2 497 * the currently available IO budget. If running ahead, the
7caa4715
TH
498 * overage.
499 *
500 * `vtime_done` is the same but progressed on completion rather
501 * than issue. The delta behind `vtime` represents the cost of
502 * currently in-flight IOs.
7caa4715
TH
503 */
504 atomic64_t vtime;
505 atomic64_t done_vtime;
0b80f986 506 u64 abs_vdebt;
7caa4715 507
5160a5a5
TH
508 /* current delay in effect and when it started */
509 u64 delay;
510 u64 delay_at;
511
7caa4715
TH
512 /*
513 * The period this iocg was last active in. Used for deactivation
514 * and invalidating `vtime`.
515 */
516 atomic64_t active_period;
517 struct list_head active_list;
518
00410f1b 519 /* see __propagate_weights() and current_hweight() for details */
7caa4715
TH
520 u64 child_active_sum;
521 u64 child_inuse_sum;
e08d02aa 522 u64 child_adjusted_sum;
7caa4715
TH
523 int hweight_gen;
524 u32 hweight_active;
525 u32 hweight_inuse;
e08d02aa 526 u32 hweight_donating;
93f7d2db 527 u32 hweight_after_donation;
7caa4715 528
97eb1975 529 struct list_head walk_list;
8692d2db 530 struct list_head surplus_list;
97eb1975 531
7caa4715
TH
532 struct wait_queue_head waitq;
533 struct hrtimer waitq_timer;
7caa4715 534
1aa50d02
TH
535 /* timestamp at the latest activation */
536 u64 activated_at;
537
97eb1975
TH
538 /* statistics */
539 struct iocg_pcpu_stat __percpu *pcpu_stat;
2a371f7d 540 struct iocg_stat stat;
97eb1975
TH
541 struct iocg_stat last_stat;
542 u64 last_stat_abs_vusage;
f1de2439 543 u64 usage_delta_us;
f0bf84a5
TH
544 u64 wait_since;
545 u64 indebt_since;
546 u64 indelay_since;
7caa4715
TH
547
548 /* this iocg's depth in the hierarchy and ancestors including self */
549 int level;
550 struct ioc_gq *ancestors[];
551};
552
553/* per cgroup */
554struct ioc_cgrp {
555 struct blkcg_policy_data cpd;
556 unsigned int dfl_weight;
557};
558
559struct ioc_now {
560 u64 now_ns;
ce95570a 561 u64 now;
7caa4715 562 u64 vnow;
7caa4715
TH
563};
564
565struct iocg_wait {
566 struct wait_queue_entry wait;
567 struct bio *bio;
568 u64 abs_cost;
569 bool committed;
570};
571
572struct iocg_wake_ctx {
573 struct ioc_gq *iocg;
574 u32 hw_inuse;
575 s64 vbudget;
576};
577
578static const struct ioc_params autop[] = {
579 [AUTOP_HDD] = {
580 .qos = {
7afcccaf
TH
581 [QOS_RLAT] = 250000, /* 250ms */
582 [QOS_WLAT] = 250000,
7caa4715
TH
583 [QOS_MIN] = VRATE_MIN_PPM,
584 [QOS_MAX] = VRATE_MAX_PPM,
585 },
586 .i_lcoefs = {
587 [I_LCOEF_RBPS] = 174019176,
588 [I_LCOEF_RSEQIOPS] = 41708,
589 [I_LCOEF_RRANDIOPS] = 370,
590 [I_LCOEF_WBPS] = 178075866,
591 [I_LCOEF_WSEQIOPS] = 42705,
592 [I_LCOEF_WRANDIOPS] = 378,
593 },
594 },
595 [AUTOP_SSD_QD1] = {
596 .qos = {
597 [QOS_RLAT] = 25000, /* 25ms */
598 [QOS_WLAT] = 25000,
599 [QOS_MIN] = VRATE_MIN_PPM,
600 [QOS_MAX] = VRATE_MAX_PPM,
601 },
602 .i_lcoefs = {
603 [I_LCOEF_RBPS] = 245855193,
604 [I_LCOEF_RSEQIOPS] = 61575,
605 [I_LCOEF_RRANDIOPS] = 6946,
606 [I_LCOEF_WBPS] = 141365009,
607 [I_LCOEF_WSEQIOPS] = 33716,
608 [I_LCOEF_WRANDIOPS] = 26796,
609 },
610 },
611 [AUTOP_SSD_DFL] = {
612 .qos = {
613 [QOS_RLAT] = 25000, /* 25ms */
614 [QOS_WLAT] = 25000,
615 [QOS_MIN] = VRATE_MIN_PPM,
616 [QOS_MAX] = VRATE_MAX_PPM,
617 },
618 .i_lcoefs = {
619 [I_LCOEF_RBPS] = 488636629,
620 [I_LCOEF_RSEQIOPS] = 8932,
621 [I_LCOEF_RRANDIOPS] = 8518,
622 [I_LCOEF_WBPS] = 427891549,
623 [I_LCOEF_WSEQIOPS] = 28755,
624 [I_LCOEF_WRANDIOPS] = 21940,
625 },
626 .too_fast_vrate_pct = 500,
627 },
628 [AUTOP_SSD_FAST] = {
629 .qos = {
630 [QOS_RLAT] = 5000, /* 5ms */
631 [QOS_WLAT] = 5000,
632 [QOS_MIN] = VRATE_MIN_PPM,
633 [QOS_MAX] = VRATE_MAX_PPM,
634 },
635 .i_lcoefs = {
636 [I_LCOEF_RBPS] = 3102524156LLU,
637 [I_LCOEF_RSEQIOPS] = 724816,
638 [I_LCOEF_RRANDIOPS] = 778122,
639 [I_LCOEF_WBPS] = 1742780862LLU,
640 [I_LCOEF_WSEQIOPS] = 425702,
641 [I_LCOEF_WRANDIOPS] = 443193,
642 },
643 .too_slow_vrate_pct = 10,
644 },
645};
646
647/*
648 * vrate adjust percentages indexed by ioc->busy_level. We adjust up on
649 * vtime credit shortage and down on device saturation.
650 */
651static u32 vrate_adj_pct[] =
652 { 0, 0, 0, 0,
653 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
654 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
655 4, 4, 4, 4, 4, 4, 4, 4, 8, 8, 8, 8, 8, 8, 8, 8, 16 };
656
657static struct blkcg_policy blkcg_policy_iocost;
658
659/* accessors and helpers */
660static struct ioc *rqos_to_ioc(struct rq_qos *rqos)
661{
662 return container_of(rqos, struct ioc, rqos);
663}
664
665static struct ioc *q_to_ioc(struct request_queue *q)
666{
667 return rqos_to_ioc(rq_qos_id(q, RQ_QOS_COST));
668}
669
7caa4715
TH
670static const char __maybe_unused *ioc_name(struct ioc *ioc)
671{
ba91c849 672 struct gendisk *disk = ioc->rqos.disk;
9df3e651
CH
673
674 if (!disk)
675 return "<unknown>";
676 return disk->disk_name;
7caa4715
TH
677}
678
679static struct ioc_gq *pd_to_iocg(struct blkg_policy_data *pd)
680{
681 return pd ? container_of(pd, struct ioc_gq, pd) : NULL;
682}
683
684static struct ioc_gq *blkg_to_iocg(struct blkcg_gq *blkg)
685{
686 return pd_to_iocg(blkg_to_pd(blkg, &blkcg_policy_iocost));
687}
688
689static struct blkcg_gq *iocg_to_blkg(struct ioc_gq *iocg)
690{
691 return pd_to_blkg(&iocg->pd);
692}
693
694static struct ioc_cgrp *blkcg_to_iocc(struct blkcg *blkcg)
695{
696 return container_of(blkcg_to_cpd(blkcg, &blkcg_policy_iocost),
697 struct ioc_cgrp, cpd);
698}
699
700/*
701 * Scale @abs_cost to the inverse of @hw_inuse. The lower the hierarchical
36a52481 702 * weight, the more expensive each IO. Must round up.
7caa4715
TH
703 */
704static u64 abs_cost_to_cost(u64 abs_cost, u32 hw_inuse)
705{
fe20cdb5 706 return DIV64_U64_ROUND_UP(abs_cost * WEIGHT_ONE, hw_inuse);
7caa4715
TH
707}
708
36a52481
TH
709/*
710 * The inverse of abs_cost_to_cost(). Must round up.
711 */
712static u64 cost_to_abs_cost(u64 cost, u32 hw_inuse)
713{
fe20cdb5 714 return DIV64_U64_ROUND_UP(cost * hw_inuse, WEIGHT_ONE);
36a52481
TH
715}
716
97eb1975
TH
717static void iocg_commit_bio(struct ioc_gq *iocg, struct bio *bio,
718 u64 abs_cost, u64 cost)
7caa4715 719{
97eb1975
TH
720 struct iocg_pcpu_stat *gcs;
721
7caa4715
TH
722 bio->bi_iocost_cost = cost;
723 atomic64_add(cost, &iocg->vtime);
97eb1975
TH
724
725 gcs = get_cpu_ptr(iocg->pcpu_stat);
726 local64_add(abs_cost, &gcs->abs_vusage);
727 put_cpu_ptr(gcs);
7caa4715
TH
728}
729
da437b95
TH
730static void iocg_lock(struct ioc_gq *iocg, bool lock_ioc, unsigned long *flags)
731{
732 if (lock_ioc) {
733 spin_lock_irqsave(&iocg->ioc->lock, *flags);
734 spin_lock(&iocg->waitq.lock);
735 } else {
736 spin_lock_irqsave(&iocg->waitq.lock, *flags);
737 }
738}
739
740static void iocg_unlock(struct ioc_gq *iocg, bool unlock_ioc, unsigned long *flags)
741{
742 if (unlock_ioc) {
743 spin_unlock(&iocg->waitq.lock);
744 spin_unlock_irqrestore(&iocg->ioc->lock, *flags);
745 } else {
746 spin_unlock_irqrestore(&iocg->waitq.lock, *flags);
747 }
748}
749
7caa4715
TH
750#define CREATE_TRACE_POINTS
751#include <trace/events/iocost.h>
752
7ca5b2e6
TH
753static void ioc_refresh_margins(struct ioc *ioc)
754{
755 struct ioc_margins *margins = &ioc->margins;
756 u32 period_us = ioc->period_us;
ac33e91e 757 u64 vrate = ioc->vtime_base_rate;
7ca5b2e6
TH
758
759 margins->min = (period_us * MARGIN_MIN_PCT / 100) * vrate;
f1de2439
TH
760 margins->low = (period_us * MARGIN_LOW_PCT / 100) * vrate;
761 margins->target = (period_us * MARGIN_TARGET_PCT / 100) * vrate;
7ca5b2e6
TH
762}
763
7caa4715
TH
764/* latency Qos params changed, update period_us and all the dependent params */
765static void ioc_refresh_period_us(struct ioc *ioc)
766{
767 u32 ppm, lat, multi, period_us;
768
769 lockdep_assert_held(&ioc->lock);
770
771 /* pick the higher latency target */
772 if (ioc->params.qos[QOS_RLAT] >= ioc->params.qos[QOS_WLAT]) {
773 ppm = ioc->params.qos[QOS_RPPM];
774 lat = ioc->params.qos[QOS_RLAT];
775 } else {
776 ppm = ioc->params.qos[QOS_WPPM];
777 lat = ioc->params.qos[QOS_WLAT];
778 }
779
780 /*
781 * We want the period to be long enough to contain a healthy number
782 * of IOs while short enough for granular control. Define it as a
783 * multiple of the latency target. Ideally, the multiplier should
784 * be scaled according to the percentile so that it would nominally
785 * contain a certain number of requests. Let's be simpler and
786 * scale it linearly so that it's 2x >= pct(90) and 10x at pct(50).
787 */
788 if (ppm)
789 multi = max_t(u32, (MILLION - ppm) / 50000, 2);
790 else
791 multi = 2;
792 period_us = multi * lat;
793 period_us = clamp_t(u32, period_us, MIN_PERIOD, MAX_PERIOD);
794
795 /* calculate dependent params */
796 ioc->period_us = period_us;
7ca5b2e6
TH
797 ioc->timer_slack_ns = div64_u64(
798 (u64)period_us * NSEC_PER_USEC * TIMER_SLACK_PCT,
799 100);
800 ioc_refresh_margins(ioc);
7caa4715
TH
801}
802
e33b9365
BL
803/*
804 * ioc->rqos.disk isn't initialized when this function is called from
805 * the init path.
806 */
807static int ioc_autop_idx(struct ioc *ioc, struct gendisk *disk)
7caa4715
TH
808{
809 int idx = ioc->autop_idx;
810 const struct ioc_params *p = &autop[idx];
811 u32 vrate_pct;
812 u64 now_ns;
813
814 /* rotational? */
e33b9365 815 if (!blk_queue_nonrot(disk->queue))
7caa4715
TH
816 return AUTOP_HDD;
817
818 /* handle SATA SSDs w/ broken NCQ */
e33b9365 819 if (blk_queue_depth(disk->queue) == 1)
7caa4715
TH
820 return AUTOP_SSD_QD1;
821
822 /* use one of the normal ssd sets */
823 if (idx < AUTOP_SSD_DFL)
824 return AUTOP_SSD_DFL;
825
826 /* if user is overriding anything, maintain what was there */
827 if (ioc->user_qos_params || ioc->user_cost_model)
828 return idx;
829
830 /* step up/down based on the vrate */
ac33e91e 831 vrate_pct = div64_u64(ioc->vtime_base_rate * 100, VTIME_PER_USEC);
7caa4715
TH
832 now_ns = ktime_get_ns();
833
834 if (p->too_fast_vrate_pct && p->too_fast_vrate_pct <= vrate_pct) {
835 if (!ioc->autop_too_fast_at)
836 ioc->autop_too_fast_at = now_ns;
837 if (now_ns - ioc->autop_too_fast_at >= AUTOP_CYCLE_NSEC)
838 return idx + 1;
839 } else {
840 ioc->autop_too_fast_at = 0;
841 }
842
843 if (p->too_slow_vrate_pct && p->too_slow_vrate_pct >= vrate_pct) {
844 if (!ioc->autop_too_slow_at)
845 ioc->autop_too_slow_at = now_ns;
846 if (now_ns - ioc->autop_too_slow_at >= AUTOP_CYCLE_NSEC)
847 return idx - 1;
848 } else {
849 ioc->autop_too_slow_at = 0;
850 }
851
852 return idx;
853}
854
855/*
856 * Take the followings as input
857 *
858 * @bps maximum sequential throughput
859 * @seqiops maximum sequential 4k iops
860 * @randiops maximum random 4k iops
861 *
862 * and calculate the linear model cost coefficients.
863 *
864 * *@page per-page cost 1s / (@bps / 4096)
865 * *@seqio base cost of a seq IO max((1s / @seqiops) - *@page, 0)
866 * @randiops base cost of a rand IO max((1s / @randiops) - *@page, 0)
867 */
868static void calc_lcoefs(u64 bps, u64 seqiops, u64 randiops,
869 u64 *page, u64 *seqio, u64 *randio)
870{
871 u64 v;
872
873 *page = *seqio = *randio = 0;
874
984af1e6
LN
875 if (bps) {
876 u64 bps_pages = DIV_ROUND_UP_ULL(bps, IOC_PAGE_SIZE);
877
878 if (bps_pages)
879 *page = DIV64_U64_ROUND_UP(VTIME_PER_SEC, bps_pages);
880 else
881 *page = 1;
882 }
7caa4715
TH
883
884 if (seqiops) {
885 v = DIV64_U64_ROUND_UP(VTIME_PER_SEC, seqiops);
886 if (v > *page)
887 *seqio = v - *page;
888 }
889
890 if (randiops) {
891 v = DIV64_U64_ROUND_UP(VTIME_PER_SEC, randiops);
892 if (v > *page)
893 *randio = v - *page;
894 }
895}
896
897static void ioc_refresh_lcoefs(struct ioc *ioc)
898{
899 u64 *u = ioc->params.i_lcoefs;
900 u64 *c = ioc->params.lcoefs;
901
902 calc_lcoefs(u[I_LCOEF_RBPS], u[I_LCOEF_RSEQIOPS], u[I_LCOEF_RRANDIOPS],
903 &c[LCOEF_RPAGE], &c[LCOEF_RSEQIO], &c[LCOEF_RRANDIO]);
904 calc_lcoefs(u[I_LCOEF_WBPS], u[I_LCOEF_WSEQIOPS], u[I_LCOEF_WRANDIOPS],
905 &c[LCOEF_WPAGE], &c[LCOEF_WSEQIO], &c[LCOEF_WRANDIO]);
906}
907
e33b9365
BL
908/*
909 * struct gendisk is required as an argument because ioc->rqos.disk
910 * is not properly initialized when called from the init path.
911 */
912static bool ioc_refresh_params_disk(struct ioc *ioc, bool force,
913 struct gendisk *disk)
7caa4715
TH
914{
915 const struct ioc_params *p;
916 int idx;
917
918 lockdep_assert_held(&ioc->lock);
919
e33b9365 920 idx = ioc_autop_idx(ioc, disk);
7caa4715
TH
921 p = &autop[idx];
922
923 if (idx == ioc->autop_idx && !force)
924 return false;
925
c6d2efdd 926 if (idx != ioc->autop_idx) {
7caa4715 927 atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC);
c6d2efdd
KS
928 ioc->vtime_base_rate = VTIME_PER_USEC;
929 }
7caa4715
TH
930
931 ioc->autop_idx = idx;
932 ioc->autop_too_fast_at = 0;
933 ioc->autop_too_slow_at = 0;
934
935 if (!ioc->user_qos_params)
936 memcpy(ioc->params.qos, p->qos, sizeof(p->qos));
937 if (!ioc->user_cost_model)
938 memcpy(ioc->params.i_lcoefs, p->i_lcoefs, sizeof(p->i_lcoefs));
939
940 ioc_refresh_period_us(ioc);
941 ioc_refresh_lcoefs(ioc);
942
943 ioc->vrate_min = DIV64_U64_ROUND_UP((u64)ioc->params.qos[QOS_MIN] *
944 VTIME_PER_USEC, MILLION);
b3260329
LN
945 ioc->vrate_max = DIV64_U64_ROUND_UP((u64)ioc->params.qos[QOS_MAX] *
946 VTIME_PER_USEC, MILLION);
7caa4715
TH
947
948 return true;
949}
950
e33b9365
BL
951static bool ioc_refresh_params(struct ioc *ioc, bool force)
952{
953 return ioc_refresh_params_disk(ioc, force, ioc->rqos.disk);
954}
955
ac33e91e
TH
956/*
957 * When an iocg accumulates too much vtime or gets deactivated, we throw away
958 * some vtime, which lowers the overall device utilization. As the exact amount
959 * which is being thrown away is known, we can compensate by accelerating the
960 * vrate accordingly so that the extra vtime generated in the current period
961 * matches what got lost.
962 */
963static void ioc_refresh_vrate(struct ioc *ioc, struct ioc_now *now)
964{
965 s64 pleft = ioc->period_at + ioc->period_us - now->now;
966 s64 vperiod = ioc->period_us * ioc->vtime_base_rate;
967 s64 vcomp, vcomp_min, vcomp_max;
968
969 lockdep_assert_held(&ioc->lock);
970
971 /* we need some time left in this period */
972 if (pleft <= 0)
973 goto done;
974
975 /*
976 * Calculate how much vrate should be adjusted to offset the error.
977 * Limit the amount of adjustment and deduct the adjusted amount from
978 * the error.
979 */
980 vcomp = -div64_s64(ioc->vtime_err, pleft);
981 vcomp_min = -(ioc->vtime_base_rate >> 1);
982 vcomp_max = ioc->vtime_base_rate;
983 vcomp = clamp(vcomp, vcomp_min, vcomp_max);
984
985 ioc->vtime_err += vcomp * pleft;
986
987 atomic64_set(&ioc->vtime_rate, ioc->vtime_base_rate + vcomp);
988done:
989 /* bound how much error can accumulate */
990 ioc->vtime_err = clamp(ioc->vtime_err, -vperiod, vperiod);
991}
992
926f75f6
BW
993static void ioc_adjust_base_vrate(struct ioc *ioc, u32 rq_wait_pct,
994 int nr_lagging, int nr_shortages,
995 int prev_busy_level, u32 *missed_ppm)
996{
997 u64 vrate = ioc->vtime_base_rate;
998 u64 vrate_min = ioc->vrate_min, vrate_max = ioc->vrate_max;
999
1000 if (!ioc->busy_level || (ioc->busy_level < 0 && nr_lagging)) {
1001 if (ioc->busy_level != prev_busy_level || nr_lagging)
63c9eac4 1002 trace_iocost_ioc_vrate_adj(ioc, vrate,
926f75f6
BW
1003 missed_ppm, rq_wait_pct,
1004 nr_lagging, nr_shortages);
1005
1006 return;
1007 }
1008
926f75f6
BW
1009 /*
1010 * If vrate is out of bounds, apply clamp gradually as the
1011 * bounds can change abruptly. Otherwise, apply busy_level
1012 * based adjustment.
1013 */
1014 if (vrate < vrate_min) {
1015 vrate = div64_u64(vrate * (100 + VRATE_CLAMP_ADJ_PCT), 100);
1016 vrate = min(vrate, vrate_min);
1017 } else if (vrate > vrate_max) {
1018 vrate = div64_u64(vrate * (100 - VRATE_CLAMP_ADJ_PCT), 100);
1019 vrate = max(vrate, vrate_max);
1020 } else {
1021 int idx = min_t(int, abs(ioc->busy_level),
1022 ARRAY_SIZE(vrate_adj_pct) - 1);
1023 u32 adj_pct = vrate_adj_pct[idx];
1024
1025 if (ioc->busy_level > 0)
1026 adj_pct = 100 - adj_pct;
1027 else
1028 adj_pct = 100 + adj_pct;
1029
1030 vrate = clamp(DIV64_U64_ROUND_UP(vrate * adj_pct, 100),
1031 vrate_min, vrate_max);
1032 }
1033
1034 trace_iocost_ioc_vrate_adj(ioc, vrate, missed_ppm, rq_wait_pct,
1035 nr_lagging, nr_shortages);
1036
1037 ioc->vtime_base_rate = vrate;
1038 ioc_refresh_margins(ioc);
1039}
1040
7caa4715
TH
1041/* take a snapshot of the current [v]time and vrate */
1042static void ioc_now(struct ioc *ioc, struct ioc_now *now)
1043{
1044 unsigned seq;
6c31be32 1045 u64 vrate;
7caa4715
TH
1046
1047 now->now_ns = ktime_get();
1048 now->now = ktime_to_us(now->now_ns);
6c31be32 1049 vrate = atomic64_read(&ioc->vtime_rate);
7caa4715
TH
1050
1051 /*
1052 * The current vtime is
1053 *
1054 * vtime at period start + (wallclock time since the start) * vrate
1055 *
1056 * As a consistent snapshot of `period_at_vtime` and `period_at` is
1057 * needed, they're seqcount protected.
1058 */
1059 do {
1060 seq = read_seqcount_begin(&ioc->period_seqcount);
1061 now->vnow = ioc->period_at_vtime +
6c31be32 1062 (now->now - ioc->period_at) * vrate;
7caa4715
TH
1063 } while (read_seqcount_retry(&ioc->period_seqcount, seq));
1064}
1065
1066static void ioc_start_period(struct ioc *ioc, struct ioc_now *now)
1067{
7caa4715
TH
1068 WARN_ON_ONCE(ioc->running != IOC_RUNNING);
1069
1070 write_seqcount_begin(&ioc->period_seqcount);
1071 ioc->period_at = now->now;
1072 ioc->period_at_vtime = now->vnow;
1073 write_seqcount_end(&ioc->period_seqcount);
1074
1075 ioc->timer.expires = jiffies + usecs_to_jiffies(ioc->period_us);
1076 add_timer(&ioc->timer);
1077}
1078
1079/*
1080 * Update @iocg's `active` and `inuse` to @active and @inuse, update level
b0853ab4
TH
1081 * weight sums and propagate upwards accordingly. If @save, the current margin
1082 * is saved to be used as reference for later inuse in-period adjustments.
7caa4715 1083 */
b0853ab4
TH
1084static void __propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
1085 bool save, struct ioc_now *now)
7caa4715
TH
1086{
1087 struct ioc *ioc = iocg->ioc;
1088 int lvl;
1089
1090 lockdep_assert_held(&ioc->lock);
1091
e9f4eee9
TH
1092 /*
1093 * For an active leaf node, its inuse shouldn't be zero or exceed
1094 * @active. An active internal node's inuse is solely determined by the
1095 * inuse to active ratio of its children regardless of @inuse.
1096 */
1097 if (list_empty(&iocg->active_list) && iocg->child_active_sum) {
1098 inuse = DIV64_U64_ROUND_UP(active * iocg->child_inuse_sum,
1099 iocg->child_active_sum);
1100 } else {
1101 inuse = clamp_t(u32, inuse, 1, active);
1102 }
db84a72a 1103
b0853ab4
TH
1104 iocg->last_inuse = iocg->inuse;
1105 if (save)
1106 iocg->saved_margin = now->vnow - atomic64_read(&iocg->vtime);
1107
db84a72a
TH
1108 if (active == iocg->active && inuse == iocg->inuse)
1109 return;
7caa4715
TH
1110
1111 for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
1112 struct ioc_gq *parent = iocg->ancestors[lvl];
1113 struct ioc_gq *child = iocg->ancestors[lvl + 1];
1114 u32 parent_active = 0, parent_inuse = 0;
1115
1116 /* update the level sums */
1117 parent->child_active_sum += (s32)(active - child->active);
1118 parent->child_inuse_sum += (s32)(inuse - child->inuse);
e9f4eee9 1119 /* apply the updates */
7caa4715
TH
1120 child->active = active;
1121 child->inuse = inuse;
1122
1123 /*
1124 * The delta between inuse and active sums indicates that
5ba1add2 1125 * much of weight is being given away. Parent's inuse
7caa4715
TH
1126 * and active should reflect the ratio.
1127 */
1128 if (parent->child_active_sum) {
1129 parent_active = parent->weight;
1130 parent_inuse = DIV64_U64_ROUND_UP(
1131 parent_active * parent->child_inuse_sum,
1132 parent->child_active_sum);
1133 }
1134
1135 /* do we need to keep walking up? */
1136 if (parent_active == parent->active &&
1137 parent_inuse == parent->inuse)
1138 break;
1139
1140 active = parent_active;
1141 inuse = parent_inuse;
1142 }
1143
1144 ioc->weights_updated = true;
1145}
1146
00410f1b 1147static void commit_weights(struct ioc *ioc)
7caa4715
TH
1148{
1149 lockdep_assert_held(&ioc->lock);
1150
1151 if (ioc->weights_updated) {
1152 /* paired with rmb in current_hweight(), see there */
1153 smp_wmb();
1154 atomic_inc(&ioc->hweight_gen);
1155 ioc->weights_updated = false;
1156 }
1157}
1158
b0853ab4
TH
1159static void propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
1160 bool save, struct ioc_now *now)
7caa4715 1161{
b0853ab4 1162 __propagate_weights(iocg, active, inuse, save, now);
00410f1b 1163 commit_weights(iocg->ioc);
7caa4715
TH
1164}
1165
1166static void current_hweight(struct ioc_gq *iocg, u32 *hw_activep, u32 *hw_inusep)
1167{
1168 struct ioc *ioc = iocg->ioc;
1169 int lvl;
1170 u32 hwa, hwi;
1171 int ioc_gen;
1172
1173 /* hot path - if uptodate, use cached */
1174 ioc_gen = atomic_read(&ioc->hweight_gen);
1175 if (ioc_gen == iocg->hweight_gen)
1176 goto out;
1177
1178 /*
00410f1b
TH
1179 * Paired with wmb in commit_weights(). If we saw the updated
1180 * hweight_gen, all the weight updates from __propagate_weights() are
1181 * visible too.
7caa4715
TH
1182 *
1183 * We can race with weight updates during calculation and get it
1184 * wrong. However, hweight_gen would have changed and a future
1185 * reader will recalculate and we're guaranteed to discard the
1186 * wrong result soon.
1187 */
1188 smp_rmb();
1189
fe20cdb5 1190 hwa = hwi = WEIGHT_ONE;
7caa4715
TH
1191 for (lvl = 0; lvl <= iocg->level - 1; lvl++) {
1192 struct ioc_gq *parent = iocg->ancestors[lvl];
1193 struct ioc_gq *child = iocg->ancestors[lvl + 1];
bd0adb91
TH
1194 u64 active_sum = READ_ONCE(parent->child_active_sum);
1195 u64 inuse_sum = READ_ONCE(parent->child_inuse_sum);
7caa4715
TH
1196 u32 active = READ_ONCE(child->active);
1197 u32 inuse = READ_ONCE(child->inuse);
1198
1199 /* we can race with deactivations and either may read as zero */
1200 if (!active_sum || !inuse_sum)
1201 continue;
1202
bd0adb91
TH
1203 active_sum = max_t(u64, active, active_sum);
1204 hwa = div64_u64((u64)hwa * active, active_sum);
7caa4715 1205
bd0adb91
TH
1206 inuse_sum = max_t(u64, inuse, inuse_sum);
1207 hwi = div64_u64((u64)hwi * inuse, inuse_sum);
7caa4715
TH
1208 }
1209
1210 iocg->hweight_active = max_t(u32, hwa, 1);
1211 iocg->hweight_inuse = max_t(u32, hwi, 1);
1212 iocg->hweight_gen = ioc_gen;
1213out:
1214 if (hw_activep)
1215 *hw_activep = iocg->hweight_active;
1216 if (hw_inusep)
1217 *hw_inusep = iocg->hweight_inuse;
1218}
1219
93f7d2db
TH
1220/*
1221 * Calculate the hweight_inuse @iocg would get with max @inuse assuming all the
1222 * other weights stay unchanged.
1223 */
1224static u32 current_hweight_max(struct ioc_gq *iocg)
1225{
1226 u32 hwm = WEIGHT_ONE;
1227 u32 inuse = iocg->active;
1228 u64 child_inuse_sum;
1229 int lvl;
1230
1231 lockdep_assert_held(&iocg->ioc->lock);
1232
1233 for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
1234 struct ioc_gq *parent = iocg->ancestors[lvl];
1235 struct ioc_gq *child = iocg->ancestors[lvl + 1];
1236
1237 child_inuse_sum = parent->child_inuse_sum + inuse - child->inuse;
1238 hwm = div64_u64((u64)hwm * inuse, child_inuse_sum);
1239 inuse = DIV64_U64_ROUND_UP(parent->active * child_inuse_sum,
1240 parent->child_active_sum);
1241 }
1242
1243 return max_t(u32, hwm, 1);
1244}
1245
b0853ab4 1246static void weight_updated(struct ioc_gq *iocg, struct ioc_now *now)
7caa4715
TH
1247{
1248 struct ioc *ioc = iocg->ioc;
1249 struct blkcg_gq *blkg = iocg_to_blkg(iocg);
1250 struct ioc_cgrp *iocc = blkcg_to_iocc(blkg->blkcg);
1251 u32 weight;
1252
1253 lockdep_assert_held(&ioc->lock);
1254
1255 weight = iocg->cfg_weight ?: iocc->dfl_weight;
1256 if (weight != iocg->weight && iocg->active)
b0853ab4 1257 propagate_weights(iocg, weight, iocg->inuse, true, now);
7caa4715
TH
1258 iocg->weight = weight;
1259}
1260
1261static bool iocg_activate(struct ioc_gq *iocg, struct ioc_now *now)
1262{
1263 struct ioc *ioc = iocg->ioc;
ac33e91e
TH
1264 u64 last_period, cur_period;
1265 u64 vtime, vtarget;
7caa4715
TH
1266 int i;
1267
1268 /*
1269 * If seem to be already active, just update the stamp to tell the
1270 * timer that we're still active. We don't mind occassional races.
1271 */
1272 if (!list_empty(&iocg->active_list)) {
1273 ioc_now(ioc, now);
1274 cur_period = atomic64_read(&ioc->cur_period);
1275 if (atomic64_read(&iocg->active_period) != cur_period)
1276 atomic64_set(&iocg->active_period, cur_period);
1277 return true;
1278 }
1279
1280 /* racy check on internal node IOs, treat as root level IOs */
1281 if (iocg->child_active_sum)
1282 return false;
1283
1284 spin_lock_irq(&ioc->lock);
1285
1286 ioc_now(ioc, now);
1287
1288 /* update period */
1289 cur_period = atomic64_read(&ioc->cur_period);
1290 last_period = atomic64_read(&iocg->active_period);
1291 atomic64_set(&iocg->active_period, cur_period);
1292
1293 /* already activated or breaking leaf-only constraint? */
8b37bc27
JX
1294 if (!list_empty(&iocg->active_list))
1295 goto succeed_unlock;
1296 for (i = iocg->level - 1; i > 0; i--)
1297 if (!list_empty(&iocg->ancestors[i]->active_list))
7caa4715 1298 goto fail_unlock;
8b37bc27 1299
7caa4715
TH
1300 if (iocg->child_active_sum)
1301 goto fail_unlock;
1302
1303 /*
ac33e91e
TH
1304 * Always start with the target budget. On deactivation, we throw away
1305 * anything above it.
7caa4715 1306 */
ac33e91e 1307 vtarget = now->vnow - ioc->margins.target;
7caa4715 1308 vtime = atomic64_read(&iocg->vtime);
7caa4715 1309
ac33e91e
TH
1310 atomic64_add(vtarget - vtime, &iocg->vtime);
1311 atomic64_add(vtarget - vtime, &iocg->done_vtime);
1312 vtime = vtarget;
7caa4715
TH
1313
1314 /*
1315 * Activate, propagate weight and start period timer if not
1316 * running. Reset hweight_gen to avoid accidental match from
1317 * wrapping.
1318 */
1319 iocg->hweight_gen = atomic_read(&ioc->hweight_gen) - 1;
1320 list_add(&iocg->active_list, &ioc->active_iocgs);
b0853ab4 1321
00410f1b 1322 propagate_weights(iocg, iocg->weight,
b0853ab4 1323 iocg->last_inuse ?: iocg->weight, true, now);
7caa4715
TH
1324
1325 TRACE_IOCG_PATH(iocg_activate, iocg, now,
1326 last_period, cur_period, vtime);
1327
1aa50d02 1328 iocg->activated_at = now->now;
7caa4715
TH
1329
1330 if (ioc->running == IOC_IDLE) {
1331 ioc->running = IOC_RUNNING;
c7af2a00
TH
1332 ioc->dfgv_period_at = now->now;
1333 ioc->dfgv_period_rem = 0;
7caa4715
TH
1334 ioc_start_period(ioc, now);
1335 }
1336
8b37bc27 1337succeed_unlock:
7caa4715
TH
1338 spin_unlock_irq(&ioc->lock);
1339 return true;
1340
1341fail_unlock:
1342 spin_unlock_irq(&ioc->lock);
1343 return false;
1344}
1345
6ef20f78
TH
1346static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now)
1347{
1348 struct ioc *ioc = iocg->ioc;
1349 struct blkcg_gq *blkg = iocg_to_blkg(iocg);
5160a5a5
TH
1350 u64 tdelta, delay, new_delay;
1351 s64 vover, vover_pct;
c421a3eb 1352 u32 hwa;
6ef20f78
TH
1353
1354 lockdep_assert_held(&iocg->waitq.lock);
1355
5160a5a5
TH
1356 /* calculate the current delay in effect - 1/2 every second */
1357 tdelta = now->now - iocg->delay_at;
1358 if (iocg->delay)
1359 delay = iocg->delay >> div64_u64(tdelta, USEC_PER_SEC);
1360 else
1361 delay = 0;
1362
1363 /* calculate the new delay from the debt amount */
c421a3eb 1364 current_hweight(iocg, &hwa, NULL);
5160a5a5
TH
1365 vover = atomic64_read(&iocg->vtime) +
1366 abs_cost_to_cost(iocg->abs_vdebt, hwa) - now->vnow;
ac33e91e
TH
1367 vover_pct = div64_s64(100 * vover,
1368 ioc->period_us * ioc->vtime_base_rate);
5160a5a5
TH
1369
1370 if (vover_pct <= MIN_DELAY_THR_PCT)
1371 new_delay = 0;
1372 else if (vover_pct >= MAX_DELAY_THR_PCT)
1373 new_delay = MAX_DELAY;
1374 else
1375 new_delay = MIN_DELAY +
1376 div_u64((MAX_DELAY - MIN_DELAY) *
1377 (vover_pct - MIN_DELAY_THR_PCT),
1378 MAX_DELAY_THR_PCT - MIN_DELAY_THR_PCT);
1379
1380 /* pick the higher one and apply */
1381 if (new_delay > delay) {
1382 iocg->delay = new_delay;
1383 iocg->delay_at = now->now;
1384 delay = new_delay;
1385 }
6ef20f78 1386
5160a5a5 1387 if (delay >= MIN_DELAY) {
f0bf84a5
TH
1388 if (!iocg->indelay_since)
1389 iocg->indelay_since = now->now;
5160a5a5
TH
1390 blkcg_set_delay(blkg, delay * NSEC_PER_USEC);
1391 return true;
1392 } else {
f0bf84a5 1393 if (iocg->indelay_since) {
2a371f7d 1394 iocg->stat.indelay_us += now->now - iocg->indelay_since;
f0bf84a5
TH
1395 iocg->indelay_since = 0;
1396 }
5160a5a5 1397 iocg->delay = 0;
6ef20f78
TH
1398 blkcg_clear_delay(blkg);
1399 return false;
1400 }
6ef20f78
TH
1401}
1402
c421a3eb
TH
1403static void iocg_incur_debt(struct ioc_gq *iocg, u64 abs_cost,
1404 struct ioc_now *now)
1405{
1406 struct iocg_pcpu_stat *gcs;
1407
1408 lockdep_assert_held(&iocg->ioc->lock);
1409 lockdep_assert_held(&iocg->waitq.lock);
1410 WARN_ON_ONCE(list_empty(&iocg->active_list));
1411
1412 /*
1413 * Once in debt, debt handling owns inuse. @iocg stays at the minimum
1414 * inuse donating all of it share to others until its debt is paid off.
1415 */
f0bf84a5
TH
1416 if (!iocg->abs_vdebt && abs_cost) {
1417 iocg->indebt_since = now->now;
c421a3eb 1418 propagate_weights(iocg, iocg->active, 0, false, now);
f0bf84a5 1419 }
c421a3eb
TH
1420
1421 iocg->abs_vdebt += abs_cost;
1422
1423 gcs = get_cpu_ptr(iocg->pcpu_stat);
1424 local64_add(abs_cost, &gcs->abs_vusage);
1425 put_cpu_ptr(gcs);
1426}
1427
1428static void iocg_pay_debt(struct ioc_gq *iocg, u64 abs_vpay,
1429 struct ioc_now *now)
1430{
1431 lockdep_assert_held(&iocg->ioc->lock);
1432 lockdep_assert_held(&iocg->waitq.lock);
1433
1434 /* make sure that nobody messed with @iocg */
1435 WARN_ON_ONCE(list_empty(&iocg->active_list));
1436 WARN_ON_ONCE(iocg->inuse > 1);
1437
1438 iocg->abs_vdebt -= min(abs_vpay, iocg->abs_vdebt);
1439
1440 /* if debt is paid in full, restore inuse */
f0bf84a5 1441 if (!iocg->abs_vdebt) {
2a371f7d 1442 iocg->stat.indebt_us += now->now - iocg->indebt_since;
f0bf84a5
TH
1443 iocg->indebt_since = 0;
1444
c421a3eb
TH
1445 propagate_weights(iocg, iocg->active, iocg->last_inuse,
1446 false, now);
f0bf84a5 1447 }
c421a3eb
TH
1448}
1449
7caa4715
TH
1450static int iocg_wake_fn(struct wait_queue_entry *wq_entry, unsigned mode,
1451 int flags, void *key)
1452{
1453 struct iocg_wait *wait = container_of(wq_entry, struct iocg_wait, wait);
a7609c68 1454 struct iocg_wake_ctx *ctx = key;
7caa4715
TH
1455 u64 cost = abs_cost_to_cost(wait->abs_cost, ctx->hw_inuse);
1456
1457 ctx->vbudget -= cost;
1458
1459 if (ctx->vbudget < 0)
1460 return -1;
1461
97eb1975 1462 iocg_commit_bio(ctx->iocg, wait->bio, wait->abs_cost, cost);
5ab189cf 1463 wait->committed = true;
7caa4715
TH
1464
1465 /*
1466 * autoremove_wake_function() removes the wait entry only when it
5ab189cf
TH
1467 * actually changed the task state. We want the wait always removed.
1468 * Remove explicitly and use default_wake_function(). Note that the
1469 * order of operations is important as finish_wait() tests whether
1470 * @wq_entry is removed without grabbing the lock.
7caa4715 1471 */
7caa4715 1472 default_wake_function(wq_entry, mode, flags, key);
5ab189cf 1473 list_del_init_careful(&wq_entry->entry);
7caa4715
TH
1474 return 0;
1475}
1476
da437b95
TH
1477/*
1478 * Calculate the accumulated budget, pay debt if @pay_debt and wake up waiters
1479 * accordingly. When @pay_debt is %true, the caller must be holding ioc->lock in
1480 * addition to iocg->waitq.lock.
1481 */
1482static void iocg_kick_waitq(struct ioc_gq *iocg, bool pay_debt,
1483 struct ioc_now *now)
7caa4715
TH
1484{
1485 struct ioc *ioc = iocg->ioc;
1486 struct iocg_wake_ctx ctx = { .iocg = iocg };
da437b95 1487 u64 vshortage, expires, oexpires;
36a52481 1488 s64 vbudget;
c421a3eb 1489 u32 hwa;
7caa4715
TH
1490
1491 lockdep_assert_held(&iocg->waitq.lock);
1492
c421a3eb 1493 current_hweight(iocg, &hwa, NULL);
36a52481
TH
1494 vbudget = now->vnow - atomic64_read(&iocg->vtime);
1495
1496 /* pay off debt */
da437b95 1497 if (pay_debt && iocg->abs_vdebt && vbudget > 0) {
c421a3eb
TH
1498 u64 abs_vbudget = cost_to_abs_cost(vbudget, hwa);
1499 u64 abs_vpay = min_t(u64, abs_vbudget, iocg->abs_vdebt);
1500 u64 vpay = abs_cost_to_cost(abs_vpay, hwa);
36a52481 1501
da437b95
TH
1502 lockdep_assert_held(&ioc->lock);
1503
c421a3eb
TH
1504 atomic64_add(vpay, &iocg->vtime);
1505 atomic64_add(vpay, &iocg->done_vtime);
1506 iocg_pay_debt(iocg, abs_vpay, now);
1507 vbudget -= vpay;
5160a5a5 1508 }
7b84b49e 1509
5160a5a5 1510 if (iocg->abs_vdebt || iocg->delay)
7b84b49e 1511 iocg_kick_delay(iocg, now);
36a52481 1512
da437b95
TH
1513 /*
1514 * Debt can still be outstanding if we haven't paid all yet or the
1515 * caller raced and called without @pay_debt. Shouldn't wake up waiters
1516 * under debt. Make sure @vbudget reflects the outstanding amount and is
1517 * not positive.
1518 */
1519 if (iocg->abs_vdebt) {
c421a3eb 1520 s64 vdebt = abs_cost_to_cost(iocg->abs_vdebt, hwa);
da437b95
TH
1521 vbudget = min_t(s64, 0, vbudget - vdebt);
1522 }
1523
7caa4715 1524 /*
c421a3eb
TH
1525 * Wake up the ones which are due and see how much vtime we'll need for
1526 * the next one. As paying off debt restores hw_inuse, it must be read
1527 * after the above debt payment.
7caa4715 1528 */
da437b95 1529 ctx.vbudget = vbudget;
c421a3eb
TH
1530 current_hweight(iocg, NULL, &ctx.hw_inuse);
1531
7caa4715 1532 __wake_up_locked_key(&iocg->waitq, TASK_NORMAL, &ctx);
c421a3eb 1533
f0bf84a5
TH
1534 if (!waitqueue_active(&iocg->waitq)) {
1535 if (iocg->wait_since) {
2a371f7d 1536 iocg->stat.wait_us += now->now - iocg->wait_since;
f0bf84a5
TH
1537 iocg->wait_since = 0;
1538 }
7caa4715 1539 return;
f0bf84a5
TH
1540 }
1541
1542 if (!iocg->wait_since)
1543 iocg->wait_since = now->now;
1544
7caa4715
TH
1545 if (WARN_ON_ONCE(ctx.vbudget >= 0))
1546 return;
1547
7ca5b2e6 1548 /* determine next wakeup, add a timer margin to guarantee chunking */
7caa4715
TH
1549 vshortage = -ctx.vbudget;
1550 expires = now->now_ns +
ac33e91e
TH
1551 DIV64_U64_ROUND_UP(vshortage, ioc->vtime_base_rate) *
1552 NSEC_PER_USEC;
7ca5b2e6 1553 expires += ioc->timer_slack_ns;
7caa4715
TH
1554
1555 /* if already active and close enough, don't bother */
1556 oexpires = ktime_to_ns(hrtimer_get_softexpires(&iocg->waitq_timer));
1557 if (hrtimer_is_queued(&iocg->waitq_timer) &&
7ca5b2e6 1558 abs(oexpires - expires) <= ioc->timer_slack_ns)
7caa4715
TH
1559 return;
1560
1561 hrtimer_start_range_ns(&iocg->waitq_timer, ns_to_ktime(expires),
7ca5b2e6 1562 ioc->timer_slack_ns, HRTIMER_MODE_ABS);
7caa4715
TH
1563}
1564
1565static enum hrtimer_restart iocg_waitq_timer_fn(struct hrtimer *timer)
1566{
1567 struct ioc_gq *iocg = container_of(timer, struct ioc_gq, waitq_timer);
da437b95 1568 bool pay_debt = READ_ONCE(iocg->abs_vdebt);
7caa4715
TH
1569 struct ioc_now now;
1570 unsigned long flags;
1571
1572 ioc_now(iocg->ioc, &now);
1573
da437b95
TH
1574 iocg_lock(iocg, pay_debt, &flags);
1575 iocg_kick_waitq(iocg, pay_debt, &now);
1576 iocg_unlock(iocg, pay_debt, &flags);
7caa4715
TH
1577
1578 return HRTIMER_NORESTART;
1579}
1580
7caa4715
TH
1581static void ioc_lat_stat(struct ioc *ioc, u32 *missed_ppm_ar, u32 *rq_wait_pct_p)
1582{
1583 u32 nr_met[2] = { };
1584 u32 nr_missed[2] = { };
1585 u64 rq_wait_ns = 0;
1586 int cpu, rw;
1587
1588 for_each_online_cpu(cpu) {
1589 struct ioc_pcpu_stat *stat = per_cpu_ptr(ioc->pcpu_stat, cpu);
1590 u64 this_rq_wait_ns;
1591
1592 for (rw = READ; rw <= WRITE; rw++) {
5e124f74
TH
1593 u32 this_met = local_read(&stat->missed[rw].nr_met);
1594 u32 this_missed = local_read(&stat->missed[rw].nr_missed);
7caa4715
TH
1595
1596 nr_met[rw] += this_met - stat->missed[rw].last_met;
1597 nr_missed[rw] += this_missed - stat->missed[rw].last_missed;
1598 stat->missed[rw].last_met = this_met;
1599 stat->missed[rw].last_missed = this_missed;
1600 }
1601
5e124f74 1602 this_rq_wait_ns = local64_read(&stat->rq_wait_ns);
7caa4715
TH
1603 rq_wait_ns += this_rq_wait_ns - stat->last_rq_wait_ns;
1604 stat->last_rq_wait_ns = this_rq_wait_ns;
1605 }
1606
1607 for (rw = READ; rw <= WRITE; rw++) {
1608 if (nr_met[rw] + nr_missed[rw])
1609 missed_ppm_ar[rw] =
1610 DIV64_U64_ROUND_UP((u64)nr_missed[rw] * MILLION,
1611 nr_met[rw] + nr_missed[rw]);
1612 else
1613 missed_ppm_ar[rw] = 0;
1614 }
1615
1616 *rq_wait_pct_p = div64_u64(rq_wait_ns * 100,
1617 ioc->period_us * NSEC_PER_USEC);
1618}
1619
1620/* was iocg idle this period? */
1621static bool iocg_is_idle(struct ioc_gq *iocg)
1622{
1623 struct ioc *ioc = iocg->ioc;
1624
1625 /* did something get issued this period? */
1626 if (atomic64_read(&iocg->active_period) ==
1627 atomic64_read(&ioc->cur_period))
1628 return false;
1629
1630 /* is something in flight? */
dcd6589b 1631 if (atomic64_read(&iocg->done_vtime) != atomic64_read(&iocg->vtime))
7caa4715
TH
1632 return false;
1633
1634 return true;
1635}
1636
97eb1975
TH
1637/*
1638 * Call this function on the target leaf @iocg's to build pre-order traversal
1639 * list of all the ancestors in @inner_walk. The inner nodes are linked through
1640 * ->walk_list and the caller is responsible for dissolving the list after use.
1641 */
1642static void iocg_build_inner_walk(struct ioc_gq *iocg,
1643 struct list_head *inner_walk)
1644{
1645 int lvl;
1646
1647 WARN_ON_ONCE(!list_empty(&iocg->walk_list));
1648
1649 /* find the first ancestor which hasn't been visited yet */
1650 for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
1651 if (!list_empty(&iocg->ancestors[lvl]->walk_list))
1652 break;
1653 }
1654
1655 /* walk down and visit the inner nodes to get pre-order traversal */
1656 while (++lvl <= iocg->level - 1) {
1657 struct ioc_gq *inner = iocg->ancestors[lvl];
1658
1659 /* record traversal order */
1660 list_add_tail(&inner->walk_list, inner_walk);
1661 }
1662}
1663
2a371f7d
CZ
1664/* propagate the deltas to the parent */
1665static void iocg_flush_stat_upward(struct ioc_gq *iocg)
1666{
1667 if (iocg->level > 0) {
1668 struct iocg_stat *parent_stat =
1669 &iocg->ancestors[iocg->level - 1]->stat;
1670
1671 parent_stat->usage_us +=
1672 iocg->stat.usage_us - iocg->last_stat.usage_us;
1673 parent_stat->wait_us +=
1674 iocg->stat.wait_us - iocg->last_stat.wait_us;
1675 parent_stat->indebt_us +=
1676 iocg->stat.indebt_us - iocg->last_stat.indebt_us;
1677 parent_stat->indelay_us +=
1678 iocg->stat.indelay_us - iocg->last_stat.indelay_us;
1679 }
1680
1681 iocg->last_stat = iocg->stat;
1682}
1683
97eb1975 1684/* collect per-cpu counters and propagate the deltas to the parent */
2a371f7d 1685static void iocg_flush_stat_leaf(struct ioc_gq *iocg, struct ioc_now *now)
97eb1975 1686{
ac33e91e 1687 struct ioc *ioc = iocg->ioc;
97eb1975
TH
1688 u64 abs_vusage = 0;
1689 u64 vusage_delta;
1690 int cpu;
1691
1692 lockdep_assert_held(&iocg->ioc->lock);
1693
1694 /* collect per-cpu counters */
1695 for_each_possible_cpu(cpu) {
1696 abs_vusage += local64_read(
1697 per_cpu_ptr(&iocg->pcpu_stat->abs_vusage, cpu));
1698 }
1699 vusage_delta = abs_vusage - iocg->last_stat_abs_vusage;
1700 iocg->last_stat_abs_vusage = abs_vusage;
1701
ac33e91e 1702 iocg->usage_delta_us = div64_u64(vusage_delta, ioc->vtime_base_rate);
2a371f7d 1703 iocg->stat.usage_us += iocg->usage_delta_us;
97eb1975 1704
2a371f7d 1705 iocg_flush_stat_upward(iocg);
97eb1975
TH
1706}
1707
1708/* get stat counters ready for reading on all active iocgs */
1709static void iocg_flush_stat(struct list_head *target_iocgs, struct ioc_now *now)
1710{
1711 LIST_HEAD(inner_walk);
1712 struct ioc_gq *iocg, *tiocg;
1713
1714 /* flush leaves and build inner node walk list */
1715 list_for_each_entry(iocg, target_iocgs, active_list) {
2a371f7d 1716 iocg_flush_stat_leaf(iocg, now);
97eb1975
TH
1717 iocg_build_inner_walk(iocg, &inner_walk);
1718 }
1719
1720 /* keep flushing upwards by walking the inner list backwards */
1721 list_for_each_entry_safe_reverse(iocg, tiocg, &inner_walk, walk_list) {
2a371f7d 1722 iocg_flush_stat_upward(iocg);
97eb1975
TH
1723 list_del_init(&iocg->walk_list);
1724 }
1725}
1726
93f7d2db
TH
1727/*
1728 * Determine what @iocg's hweight_inuse should be after donating unused
1729 * capacity. @hwm is the upper bound and used to signal no donation. This
1730 * function also throws away @iocg's excess budget.
1731 */
ac33e91e
TH
1732static u32 hweight_after_donation(struct ioc_gq *iocg, u32 old_hwi, u32 hwm,
1733 u32 usage, struct ioc_now *now)
7caa4715 1734{
93f7d2db
TH
1735 struct ioc *ioc = iocg->ioc;
1736 u64 vtime = atomic64_read(&iocg->vtime);
f1de2439 1737 s64 excess, delta, target, new_hwi;
93f7d2db 1738
c421a3eb
TH
1739 /* debt handling owns inuse for debtors */
1740 if (iocg->abs_vdebt)
1741 return 1;
1742
93f7d2db
TH
1743 /* see whether minimum margin requirement is met */
1744 if (waitqueue_active(&iocg->waitq) ||
1745 time_after64(vtime, now->vnow - ioc->margins.min))
1746 return hwm;
1747
ac33e91e
TH
1748 /* throw away excess above target */
1749 excess = now->vnow - vtime - ioc->margins.target;
93f7d2db
TH
1750 if (excess > 0) {
1751 atomic64_add(excess, &iocg->vtime);
1752 atomic64_add(excess, &iocg->done_vtime);
1753 vtime += excess;
ac33e91e 1754 ioc->vtime_err -= div64_u64(excess * old_hwi, WEIGHT_ONE);
93f7d2db
TH
1755 }
1756
f1de2439
TH
1757 /*
1758 * Let's say the distance between iocg's and device's vtimes as a
1759 * fraction of period duration is delta. Assuming that the iocg will
1760 * consume the usage determined above, we want to determine new_hwi so
1761 * that delta equals MARGIN_TARGET at the end of the next period.
1762 *
1763 * We need to execute usage worth of IOs while spending the sum of the
1764 * new budget (1 - MARGIN_TARGET) and the leftover from the last period
1765 * (delta):
1766 *
1767 * usage = (1 - MARGIN_TARGET + delta) * new_hwi
1768 *
1769 * Therefore, the new_hwi is:
1770 *
1771 * new_hwi = usage / (1 - MARGIN_TARGET + delta)
1772 */
1773 delta = div64_s64(WEIGHT_ONE * (now->vnow - vtime),
1774 now->vnow - ioc->period_at_vtime);
1775 target = WEIGHT_ONE * MARGIN_TARGET_PCT / 100;
1776 new_hwi = div64_s64(WEIGHT_ONE * usage, WEIGHT_ONE - target + delta);
7caa4715 1777
f1de2439 1778 return clamp_t(s64, new_hwi, 1, hwm);
7caa4715
TH
1779}
1780
e08d02aa
TH
1781/*
1782 * For work-conservation, an iocg which isn't using all of its share should
1783 * donate the leftover to other iocgs. There are two ways to achieve this - 1.
1784 * bumping up vrate accordingly 2. lowering the donating iocg's inuse weight.
1785 *
1786 * #1 is mathematically simpler but has the drawback of requiring synchronous
1787 * global hweight_inuse updates when idle iocg's get activated or inuse weights
1788 * change due to donation snapbacks as it has the possibility of grossly
1789 * overshooting what's allowed by the model and vrate.
1790 *
1791 * #2 is inherently safe with local operations. The donating iocg can easily
1792 * snap back to higher weights when needed without worrying about impacts on
1793 * other nodes as the impacts will be inherently correct. This also makes idle
1794 * iocg activations safe. The only effect activations have is decreasing
1795 * hweight_inuse of others, the right solution to which is for those iocgs to
1796 * snap back to higher weights.
1797 *
1798 * So, we go with #2. The challenge is calculating how each donating iocg's
1799 * inuse should be adjusted to achieve the target donation amounts. This is done
1800 * using Andy's method described in the following pdf.
1801 *
1802 * https://drive.google.com/file/d/1PsJwxPFtjUnwOY1QJ5AeICCcsL7BM3bo
1803 *
1804 * Given the weights and target after-donation hweight_inuse values, Andy's
1805 * method determines how the proportional distribution should look like at each
1806 * sibling level to maintain the relative relationship between all non-donating
1807 * pairs. To roughly summarize, it divides the tree into donating and
1808 * non-donating parts, calculates global donation rate which is used to
1809 * determine the target hweight_inuse for each node, and then derives per-level
1810 * proportions.
1811 *
1812 * The following pdf shows that global distribution calculated this way can be
1813 * achieved by scaling inuse weights of donating leaves and propagating the
1814 * adjustments upwards proportionally.
1815 *
1816 * https://drive.google.com/file/d/1vONz1-fzVO7oY5DXXsLjSxEtYYQbOvsE
1817 *
1818 * Combining the above two, we can determine how each leaf iocg's inuse should
1819 * be adjusted to achieve the target donation.
1820 *
1821 * https://drive.google.com/file/d/1WcrltBOSPN0qXVdBgnKm4mdp9FhuEFQN
1822 *
1823 * The inline comments use symbols from the last pdf.
1824 *
1825 * b is the sum of the absolute budgets in the subtree. 1 for the root node.
1826 * f is the sum of the absolute budgets of non-donating nodes in the subtree.
1827 * t is the sum of the absolute budgets of donating nodes in the subtree.
1828 * w is the weight of the node. w = w_f + w_t
1829 * w_f is the non-donating portion of w. w_f = w * f / b
1830 * w_b is the donating portion of w. w_t = w * t / b
1831 * s is the sum of all sibling weights. s = Sum(w) for siblings
1832 * s_f and s_t are the non-donating and donating portions of s.
1833 *
1834 * Subscript p denotes the parent's counterpart and ' the adjusted value - e.g.
1835 * w_pt is the donating portion of the parent's weight and w'_pt the same value
1836 * after adjustments. Subscript r denotes the root node's values.
1837 */
93f7d2db
TH
1838static void transfer_surpluses(struct list_head *surpluses, struct ioc_now *now)
1839{
e08d02aa
TH
1840 LIST_HEAD(over_hwa);
1841 LIST_HEAD(inner_walk);
1842 struct ioc_gq *iocg, *tiocg, *root_iocg;
1843 u32 after_sum, over_sum, over_target, gamma;
93f7d2db 1844
e08d02aa
TH
1845 /*
1846 * It's pretty unlikely but possible for the total sum of
1847 * hweight_after_donation's to be higher than WEIGHT_ONE, which will
1848 * confuse the following calculations. If such condition is detected,
1849 * scale down everyone over its full share equally to keep the sum below
1850 * WEIGHT_ONE.
1851 */
1852 after_sum = 0;
1853 over_sum = 0;
93f7d2db 1854 list_for_each_entry(iocg, surpluses, surplus_list) {
e08d02aa 1855 u32 hwa;
93f7d2db 1856
e08d02aa
TH
1857 current_hweight(iocg, &hwa, NULL);
1858 after_sum += iocg->hweight_after_donation;
93f7d2db 1859
e08d02aa
TH
1860 if (iocg->hweight_after_donation > hwa) {
1861 over_sum += iocg->hweight_after_donation;
1862 list_add(&iocg->walk_list, &over_hwa);
1863 }
93f7d2db 1864 }
e08d02aa
TH
1865
1866 if (after_sum >= WEIGHT_ONE) {
1867 /*
1868 * The delta should be deducted from the over_sum, calculate
1869 * target over_sum value.
1870 */
1871 u32 over_delta = after_sum - (WEIGHT_ONE - 1);
1872 WARN_ON_ONCE(over_sum <= over_delta);
1873 over_target = over_sum - over_delta;
1874 } else {
1875 over_target = 0;
1876 }
1877
1878 list_for_each_entry_safe(iocg, tiocg, &over_hwa, walk_list) {
1879 if (over_target)
1880 iocg->hweight_after_donation =
1881 div_u64((u64)iocg->hweight_after_donation *
1882 over_target, over_sum);
1883 list_del_init(&iocg->walk_list);
1884 }
1885
1886 /*
1887 * Build pre-order inner node walk list and prepare for donation
1888 * adjustment calculations.
1889 */
1890 list_for_each_entry(iocg, surpluses, surplus_list) {
1891 iocg_build_inner_walk(iocg, &inner_walk);
1892 }
1893
1894 root_iocg = list_first_entry(&inner_walk, struct ioc_gq, walk_list);
1895 WARN_ON_ONCE(root_iocg->level > 0);
1896
1897 list_for_each_entry(iocg, &inner_walk, walk_list) {
1898 iocg->child_adjusted_sum = 0;
1899 iocg->hweight_donating = 0;
1900 iocg->hweight_after_donation = 0;
1901 }
1902
1903 /*
1904 * Propagate the donating budget (b_t) and after donation budget (b'_t)
1905 * up the hierarchy.
1906 */
1907 list_for_each_entry(iocg, surpluses, surplus_list) {
1908 struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1909
1910 parent->hweight_donating += iocg->hweight_donating;
1911 parent->hweight_after_donation += iocg->hweight_after_donation;
1912 }
1913
1914 list_for_each_entry_reverse(iocg, &inner_walk, walk_list) {
1915 if (iocg->level > 0) {
1916 struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1917
1918 parent->hweight_donating += iocg->hweight_donating;
1919 parent->hweight_after_donation += iocg->hweight_after_donation;
1920 }
1921 }
1922
1923 /*
1924 * Calculate inner hwa's (b) and make sure the donation values are
1925 * within the accepted ranges as we're doing low res calculations with
1926 * roundups.
1927 */
1928 list_for_each_entry(iocg, &inner_walk, walk_list) {
1929 if (iocg->level) {
1930 struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1931
1932 iocg->hweight_active = DIV64_U64_ROUND_UP(
1933 (u64)parent->hweight_active * iocg->active,
1934 parent->child_active_sum);
1935
1936 }
1937
1938 iocg->hweight_donating = min(iocg->hweight_donating,
1939 iocg->hweight_active);
1940 iocg->hweight_after_donation = min(iocg->hweight_after_donation,
1941 iocg->hweight_donating - 1);
1942 if (WARN_ON_ONCE(iocg->hweight_active <= 1 ||
1943 iocg->hweight_donating <= 1 ||
1944 iocg->hweight_after_donation == 0)) {
1945 pr_warn("iocg: invalid donation weights in ");
1946 pr_cont_cgroup_path(iocg_to_blkg(iocg)->blkcg->css.cgroup);
1947 pr_cont(": active=%u donating=%u after=%u\n",
1948 iocg->hweight_active, iocg->hweight_donating,
1949 iocg->hweight_after_donation);
1950 }
1951 }
1952
1953 /*
1954 * Calculate the global donation rate (gamma) - the rate to adjust
769b628d
TH
1955 * non-donating budgets by.
1956 *
1957 * No need to use 64bit multiplication here as the first operand is
1958 * guaranteed to be smaller than WEIGHT_ONE (1<<16).
1959 *
1960 * We know that there are beneficiary nodes and the sum of the donating
1961 * hweights can't be whole; however, due to the round-ups during hweight
1962 * calculations, root_iocg->hweight_donating might still end up equal to
1963 * or greater than whole. Limit the range when calculating the divider.
e08d02aa
TH
1964 *
1965 * gamma = (1 - t_r') / (1 - t_r)
1966 */
1967 gamma = DIV_ROUND_UP(
1968 (WEIGHT_ONE - root_iocg->hweight_after_donation) * WEIGHT_ONE,
769b628d 1969 WEIGHT_ONE - min_t(u32, root_iocg->hweight_donating, WEIGHT_ONE - 1));
e08d02aa
TH
1970
1971 /*
1972 * Calculate adjusted hwi, child_adjusted_sum and inuse for the inner
1973 * nodes.
1974 */
1975 list_for_each_entry(iocg, &inner_walk, walk_list) {
1976 struct ioc_gq *parent;
1977 u32 inuse, wpt, wptp;
1978 u64 st, sf;
1979
1980 if (iocg->level == 0) {
1981 /* adjusted weight sum for 1st level: s' = s * b_pf / b'_pf */
1982 iocg->child_adjusted_sum = DIV64_U64_ROUND_UP(
1983 iocg->child_active_sum * (WEIGHT_ONE - iocg->hweight_donating),
1984 WEIGHT_ONE - iocg->hweight_after_donation);
1985 continue;
1986 }
1987
1988 parent = iocg->ancestors[iocg->level - 1];
1989
1990 /* b' = gamma * b_f + b_t' */
1991 iocg->hweight_inuse = DIV64_U64_ROUND_UP(
1992 (u64)gamma * (iocg->hweight_active - iocg->hweight_donating),
1993 WEIGHT_ONE) + iocg->hweight_after_donation;
1994
1995 /* w' = s' * b' / b'_p */
1996 inuse = DIV64_U64_ROUND_UP(
1997 (u64)parent->child_adjusted_sum * iocg->hweight_inuse,
1998 parent->hweight_inuse);
1999
2000 /* adjusted weight sum for children: s' = s_f + s_t * w'_pt / w_pt */
2001 st = DIV64_U64_ROUND_UP(
2002 iocg->child_active_sum * iocg->hweight_donating,
2003 iocg->hweight_active);
2004 sf = iocg->child_active_sum - st;
2005 wpt = DIV64_U64_ROUND_UP(
2006 (u64)iocg->active * iocg->hweight_donating,
2007 iocg->hweight_active);
2008 wptp = DIV64_U64_ROUND_UP(
2009 (u64)inuse * iocg->hweight_after_donation,
2010 iocg->hweight_inuse);
2011
2012 iocg->child_adjusted_sum = sf + DIV64_U64_ROUND_UP(st * wptp, wpt);
2013 }
2014
2015 /*
2016 * All inner nodes now have ->hweight_inuse and ->child_adjusted_sum and
2017 * we can finally determine leaf adjustments.
2018 */
2019 list_for_each_entry(iocg, surpluses, surplus_list) {
2020 struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
2021 u32 inuse;
2022
c421a3eb
TH
2023 /*
2024 * In-debt iocgs participated in the donation calculation with
2025 * the minimum target hweight_inuse. Configuring inuse
2026 * accordingly would work fine but debt handling expects
2027 * @iocg->inuse stay at the minimum and we don't wanna
2028 * interfere.
2029 */
2030 if (iocg->abs_vdebt) {
2031 WARN_ON_ONCE(iocg->inuse > 1);
2032 continue;
2033 }
2034
e08d02aa
TH
2035 /* w' = s' * b' / b'_p, note that b' == b'_t for donating leaves */
2036 inuse = DIV64_U64_ROUND_UP(
2037 parent->child_adjusted_sum * iocg->hweight_after_donation,
2038 parent->hweight_inuse);
04603755
TH
2039
2040 TRACE_IOCG_PATH(inuse_transfer, iocg, now,
2041 iocg->inuse, inuse,
2042 iocg->hweight_inuse,
2043 iocg->hweight_after_donation);
2044
b0853ab4 2045 __propagate_weights(iocg, iocg->active, inuse, true, now);
e08d02aa
TH
2046 }
2047
2048 /* walk list should be dissolved after use */
2049 list_for_each_entry_safe(iocg, tiocg, &inner_walk, walk_list)
2050 list_del_init(&iocg->walk_list);
93f7d2db
TH
2051}
2052
ab8df828
TH
2053/*
2054 * A low weight iocg can amass a large amount of debt, for example, when
2055 * anonymous memory gets reclaimed aggressively. If the system has a lot of
2056 * memory paired with a slow IO device, the debt can span multiple seconds or
2057 * more. If there are no other subsequent IO issuers, the in-debt iocg may end
2058 * up blocked paying its debt while the IO device is idle.
2059 *
2060 * The following protects against such cases. If the device has been
d9517841
TH
2061 * sufficiently idle for a while, the debts are halved and delays are
2062 * recalculated.
ab8df828
TH
2063 */
2064static void ioc_forgive_debts(struct ioc *ioc, u64 usage_us_sum, int nr_debtors,
33a1fe6d 2065 struct ioc_now *now)
ab8df828 2066{
c7af2a00
TH
2067 struct ioc_gq *iocg;
2068 u64 dur, usage_pct, nr_cycles;
2069
2070 /* if no debtor, reset the cycle */
2071 if (!nr_debtors) {
2072 ioc->dfgv_period_at = now->now;
2073 ioc->dfgv_period_rem = 0;
2074 ioc->dfgv_usage_us_sum = 0;
2075 return;
2076 }
2077
2078 /*
2079 * Debtors can pass through a lot of writes choking the device and we
2080 * don't want to be forgiving debts while the device is struggling from
2081 * write bursts. If we're missing latency targets, consider the device
2082 * fully utilized.
2083 */
2084 if (ioc->busy_level > 0)
2085 usage_us_sum = max_t(u64, usage_us_sum, ioc->period_us);
2086
2087 ioc->dfgv_usage_us_sum += usage_us_sum;
2088 if (time_before64(now->now, ioc->dfgv_period_at + DFGV_PERIOD))
2089 return;
2090
2091 /*
2092 * At least DFGV_PERIOD has passed since the last period. Calculate the
2093 * average usage and reset the period counters.
2094 */
2095 dur = now->now - ioc->dfgv_period_at;
2096 usage_pct = div64_u64(100 * ioc->dfgv_usage_us_sum, dur);
2097
2098 ioc->dfgv_period_at = now->now;
2099 ioc->dfgv_usage_us_sum = 0;
2100
2101 /* if was too busy, reset everything */
2102 if (usage_pct > DFGV_USAGE_PCT) {
2103 ioc->dfgv_period_rem = 0;
2104 return;
2105 }
2106
2107 /*
2108 * Usage is lower than threshold. Let's forgive some debts. Debt
2109 * forgiveness runs off of the usual ioc timer but its period usually
2110 * doesn't match ioc's. Compensate the difference by performing the
2111 * reduction as many times as would fit in the duration since the last
2112 * run and carrying over the left-over duration in @ioc->dfgv_period_rem
2113 * - if ioc period is 75% of DFGV_PERIOD, one out of three consecutive
2114 * reductions is doubled.
2115 */
2116 nr_cycles = dur + ioc->dfgv_period_rem;
2117 ioc->dfgv_period_rem = do_div(nr_cycles, DFGV_PERIOD);
2118
2119 list_for_each_entry(iocg, &ioc->active_iocgs, active_list) {
c5a6561b
TH
2120 u64 __maybe_unused old_debt, __maybe_unused old_delay;
2121
bec02dbb 2122 if (!iocg->abs_vdebt && !iocg->delay)
c7af2a00 2123 continue;
c5a6561b 2124
c7af2a00 2125 spin_lock(&iocg->waitq.lock);
c5a6561b
TH
2126
2127 old_debt = iocg->abs_vdebt;
2128 old_delay = iocg->delay;
2129
bec02dbb
TH
2130 if (iocg->abs_vdebt)
2131 iocg->abs_vdebt = iocg->abs_vdebt >> nr_cycles ?: 1;
2132 if (iocg->delay)
2133 iocg->delay = iocg->delay >> nr_cycles ?: 1;
2134
c7af2a00 2135 iocg_kick_waitq(iocg, true, now);
c5a6561b
TH
2136
2137 TRACE_IOCG_PATH(iocg_forgive_debt, iocg, now, usage_pct,
2138 old_debt, iocg->abs_vdebt,
2139 old_delay, iocg->delay);
2140
c7af2a00 2141 spin_unlock(&iocg->waitq.lock);
ab8df828
TH
2142 }
2143}
2144
2474787a
BW
2145/*
2146 * Check the active iocgs' state to avoid oversleeping and deactive
2147 * idle iocgs.
2148 *
2149 * Since waiters determine the sleep durations based on the vrate
2150 * they saw at the time of sleep, if vrate has increased, some
2151 * waiters could be sleeping for too long. Wake up tardy waiters
2152 * which should have woken up in the last period and expire idle
2153 * iocgs.
2154 */
2155static int ioc_check_iocgs(struct ioc *ioc, struct ioc_now *now)
7caa4715 2156{
2474787a 2157 int nr_debtors = 0;
7caa4715 2158 struct ioc_gq *iocg, *tiocg;
7caa4715 2159
7caa4715 2160 list_for_each_entry_safe(iocg, tiocg, &ioc->active_iocgs, active_list) {
d9012a59 2161 if (!waitqueue_active(&iocg->waitq) && !iocg->abs_vdebt &&
5160a5a5 2162 !iocg->delay && !iocg_is_idle(iocg))
7caa4715
TH
2163 continue;
2164
2165 spin_lock(&iocg->waitq.lock);
2166
f0bf84a5
TH
2167 /* flush wait and indebt stat deltas */
2168 if (iocg->wait_since) {
2a371f7d 2169 iocg->stat.wait_us += now->now - iocg->wait_since;
2474787a 2170 iocg->wait_since = now->now;
f0bf84a5
TH
2171 }
2172 if (iocg->indebt_since) {
2a371f7d 2173 iocg->stat.indebt_us +=
2474787a
BW
2174 now->now - iocg->indebt_since;
2175 iocg->indebt_since = now->now;
f0bf84a5
TH
2176 }
2177 if (iocg->indelay_since) {
2a371f7d 2178 iocg->stat.indelay_us +=
2474787a
BW
2179 now->now - iocg->indelay_since;
2180 iocg->indelay_since = now->now;
f0bf84a5
TH
2181 }
2182
5160a5a5
TH
2183 if (waitqueue_active(&iocg->waitq) || iocg->abs_vdebt ||
2184 iocg->delay) {
7caa4715 2185 /* might be oversleeping vtime / hweight changes, kick */
2474787a 2186 iocg_kick_waitq(iocg, true, now);
bec02dbb 2187 if (iocg->abs_vdebt || iocg->delay)
dda1315f 2188 nr_debtors++;
7caa4715
TH
2189 } else if (iocg_is_idle(iocg)) {
2190 /* no waiter and idle, deactivate */
ac33e91e
TH
2191 u64 vtime = atomic64_read(&iocg->vtime);
2192 s64 excess;
2193
2194 /*
2195 * @iocg has been inactive for a full duration and will
2196 * have a high budget. Account anything above target as
2197 * error and throw away. On reactivation, it'll start
2198 * with the target budget.
2199 */
2474787a 2200 excess = now->vnow - vtime - ioc->margins.target;
ac33e91e
TH
2201 if (excess > 0) {
2202 u32 old_hwi;
2203
2204 current_hweight(iocg, NULL, &old_hwi);
2205 ioc->vtime_err -= div64_u64(excess * old_hwi,
2206 WEIGHT_ONE);
2207 }
2208
76efc1c7
BW
2209 TRACE_IOCG_PATH(iocg_idle, iocg, now,
2210 atomic64_read(&iocg->active_period),
2211 atomic64_read(&ioc->cur_period), vtime);
2474787a 2212 __propagate_weights(iocg, 0, 0, false, now);
7caa4715
TH
2213 list_del_init(&iocg->active_list);
2214 }
2215
2216 spin_unlock(&iocg->waitq.lock);
2217 }
2474787a 2218
00410f1b 2219 commit_weights(ioc);
2474787a
BW
2220 return nr_debtors;
2221}
2222
2223static void ioc_timer_fn(struct timer_list *timer)
2224{
2225 struct ioc *ioc = container_of(timer, struct ioc, timer);
2226 struct ioc_gq *iocg, *tiocg;
2227 struct ioc_now now;
2228 LIST_HEAD(surpluses);
2229 int nr_debtors, nr_shortages = 0, nr_lagging = 0;
2230 u64 usage_us_sum = 0;
074501bc
YK
2231 u32 ppm_rthr;
2232 u32 ppm_wthr;
2474787a
BW
2233 u32 missed_ppm[2], rq_wait_pct;
2234 u64 period_vtime;
2235 int prev_busy_level;
2236
2237 /* how were the latencies during the period? */
2238 ioc_lat_stat(ioc, missed_ppm, &rq_wait_pct);
2239
2240 /* take care of active iocgs */
2241 spin_lock_irq(&ioc->lock);
2242
074501bc
YK
2243 ppm_rthr = MILLION - ioc->params.qos[QOS_RPPM];
2244 ppm_wthr = MILLION - ioc->params.qos[QOS_WPPM];
2474787a
BW
2245 ioc_now(ioc, &now);
2246
2247 period_vtime = now.vnow - ioc->period_at_vtime;
2248 if (WARN_ON_ONCE(!period_vtime)) {
2249 spin_unlock_irq(&ioc->lock);
2250 return;
2251 }
2252
2253 nr_debtors = ioc_check_iocgs(ioc, &now);
7caa4715 2254
f0bf84a5
TH
2255 /*
2256 * Wait and indebt stat are flushed above and the donation calculation
2257 * below needs updated usage stat. Let's bring stat up-to-date.
2258 */
2259 iocg_flush_stat(&ioc->active_iocgs, &now);
2260
f1de2439 2261 /* calc usage and see whether some weights need to be moved around */
7caa4715 2262 list_for_each_entry(iocg, &ioc->active_iocgs, active_list) {
c09245f6
BW
2263 u64 vdone, vtime, usage_us;
2264 u32 hw_active, hw_inuse;
7caa4715
TH
2265
2266 /*
2267 * Collect unused and wind vtime closer to vnow to prevent
2268 * iocgs from accumulating a large amount of budget.
2269 */
2270 vdone = atomic64_read(&iocg->done_vtime);
2271 vtime = atomic64_read(&iocg->vtime);
2272 current_hweight(iocg, &hw_active, &hw_inuse);
2273
2274 /*
2275 * Latency QoS detection doesn't account for IOs which are
2276 * in-flight for longer than a period. Detect them by
2277 * comparing vdone against period start. If lagging behind
2278 * IOs from past periods, don't increase vrate.
2279 */
7cd806a9
TH
2280 if ((ppm_rthr != MILLION || ppm_wthr != MILLION) &&
2281 !atomic_read(&iocg_to_blkg(iocg)->use_delay) &&
7caa4715
TH
2282 time_after64(vtime, vdone) &&
2283 time_after64(vtime, now.vnow -
2284 MAX_LAGGING_PERIODS * period_vtime) &&
2285 time_before64(vdone, now.vnow - period_vtime))
2286 nr_lagging++;
2287
7caa4715 2288 /*
f1de2439
TH
2289 * Determine absolute usage factoring in in-flight IOs to avoid
2290 * high-latency completions appearing as idle.
7caa4715 2291 */
1aa50d02 2292 usage_us = iocg->usage_delta_us;
dda1315f 2293 usage_us_sum += usage_us;
f1de2439 2294
7caa4715 2295 /* see whether there's surplus vtime */
8692d2db 2296 WARN_ON_ONCE(!list_empty(&iocg->surplus_list));
93f7d2db
TH
2297 if (hw_inuse < hw_active ||
2298 (!waitqueue_active(&iocg->waitq) &&
f1de2439 2299 time_before64(vtime, now.vnow - ioc->margins.low))) {
c09245f6
BW
2300 u32 hwa, old_hwi, hwm, new_hwi, usage;
2301 u64 usage_dur;
2302
2303 if (vdone != vtime) {
2304 u64 inflight_us = DIV64_U64_ROUND_UP(
2305 cost_to_abs_cost(vtime - vdone, hw_inuse),
2306 ioc->vtime_base_rate);
2307
2308 usage_us = max(usage_us, inflight_us);
2309 }
2310
2311 /* convert to hweight based usage ratio */
2312 if (time_after64(iocg->activated_at, ioc->period_at))
2313 usage_dur = max_t(u64, now.now - iocg->activated_at, 1);
2314 else
2315 usage_dur = max_t(u64, now.now - ioc->period_at, 1);
2316
2317 usage = clamp_t(u32,
2318 DIV64_U64_ROUND_UP(usage_us * WEIGHT_ONE,
2319 usage_dur),
2320 1, WEIGHT_ONE);
93f7d2db
TH
2321
2322 /*
2323 * Already donating or accumulated enough to start.
2324 * Determine the donation amount.
2325 */
ac33e91e 2326 current_hweight(iocg, &hwa, &old_hwi);
93f7d2db 2327 hwm = current_hweight_max(iocg);
ac33e91e
TH
2328 new_hwi = hweight_after_donation(iocg, old_hwi, hwm,
2329 usage, &now);
edaa2633
TH
2330 /*
2331 * Donation calculation assumes hweight_after_donation
2332 * to be positive, a condition that a donor w/ hwa < 2
2333 * can't meet. Don't bother with donation if hwa is
2334 * below 2. It's not gonna make a meaningful difference
2335 * anyway.
2336 */
2337 if (new_hwi < hwm && hwa >= 2) {
e08d02aa 2338 iocg->hweight_donating = hwa;
93f7d2db 2339 iocg->hweight_after_donation = new_hwi;
8692d2db 2340 list_add(&iocg->surplus_list, &surpluses);
8c936f9e
TH
2341 } else if (!iocg->abs_vdebt) {
2342 /*
2343 * @iocg doesn't have enough to donate. Reset
2344 * its inuse to active.
2345 *
2346 * Don't reset debtors as their inuse's are
2347 * owned by debt handling. This shouldn't affect
2348 * donation calculuation in any meaningful way
2349 * as @iocg doesn't have a meaningful amount of
2350 * share anyway.
2351 */
04603755
TH
2352 TRACE_IOCG_PATH(inuse_shortage, iocg, &now,
2353 iocg->inuse, iocg->active,
2354 iocg->hweight_inuse, new_hwi);
2355
93f7d2db 2356 __propagate_weights(iocg, iocg->active,
b0853ab4 2357 iocg->active, true, &now);
93f7d2db 2358 nr_shortages++;
7caa4715
TH
2359 }
2360 } else {
93f7d2db 2361 /* genuinely short on vtime */
7caa4715
TH
2362 nr_shortages++;
2363 }
2364 }
2365
93f7d2db
TH
2366 if (!list_empty(&surpluses) && nr_shortages)
2367 transfer_surpluses(&surpluses, &now);
7caa4715 2368
00410f1b 2369 commit_weights(ioc);
7caa4715 2370
8692d2db
TH
2371 /* surplus list should be dissolved after use */
2372 list_for_each_entry_safe(iocg, tiocg, &surpluses, surplus_list)
2373 list_del_init(&iocg->surplus_list);
2374
7caa4715
TH
2375 /*
2376 * If q is getting clogged or we're missing too much, we're issuing
2377 * too much IO and should lower vtime rate. If we're not missing
2378 * and experiencing shortages but not surpluses, we're too stingy
2379 * and should increase vtime rate.
2380 */
25d41e4a 2381 prev_busy_level = ioc->busy_level;
7caa4715
TH
2382 if (rq_wait_pct > RQ_WAIT_BUSY_PCT ||
2383 missed_ppm[READ] > ppm_rthr ||
2384 missed_ppm[WRITE] > ppm_wthr) {
81ca627a 2385 /* clearly missing QoS targets, slow down vrate */
7caa4715
TH
2386 ioc->busy_level = max(ioc->busy_level, 0);
2387 ioc->busy_level++;
7cd806a9 2388 } else if (rq_wait_pct <= RQ_WAIT_BUSY_PCT * UNBUSY_THR_PCT / 100 &&
7caa4715
TH
2389 missed_ppm[READ] <= ppm_rthr * UNBUSY_THR_PCT / 100 &&
2390 missed_ppm[WRITE] <= ppm_wthr * UNBUSY_THR_PCT / 100) {
81ca627a
TH
2391 /* QoS targets are being met with >25% margin */
2392 if (nr_shortages) {
2393 /*
2394 * We're throttling while the device has spare
2395 * capacity. If vrate was being slowed down, stop.
2396 */
7cd806a9 2397 ioc->busy_level = min(ioc->busy_level, 0);
81ca627a
TH
2398
2399 /*
2400 * If there are IOs spanning multiple periods, wait
065655c8 2401 * them out before pushing the device harder.
81ca627a 2402 */
065655c8 2403 if (!nr_lagging)
7cd806a9 2404 ioc->busy_level--;
81ca627a
TH
2405 } else {
2406 /*
2407 * Nobody is being throttled and the users aren't
2408 * issuing enough IOs to saturate the device. We
2409 * simply don't know how close the device is to
2410 * saturation. Coast.
2411 */
2412 ioc->busy_level = 0;
7cd806a9 2413 }
7caa4715 2414 } else {
81ca627a 2415 /* inside the hysterisis margin, we're good */
7caa4715
TH
2416 ioc->busy_level = 0;
2417 }
2418
2419 ioc->busy_level = clamp(ioc->busy_level, -1000, 1000);
2420
926f75f6
BW
2421 ioc_adjust_base_vrate(ioc, rq_wait_pct, nr_lagging, nr_shortages,
2422 prev_busy_level, missed_ppm);
7caa4715
TH
2423
2424 ioc_refresh_params(ioc, false);
2425
33a1fe6d
TH
2426 ioc_forgive_debts(ioc, usage_us_sum, nr_debtors, &now);
2427
7caa4715
TH
2428 /*
2429 * This period is done. Move onto the next one. If nothing's
2430 * going on with the device, stop the timer.
2431 */
2432 atomic64_inc(&ioc->cur_period);
2433
2434 if (ioc->running != IOC_STOP) {
2435 if (!list_empty(&ioc->active_iocgs)) {
2436 ioc_start_period(ioc, &now);
2437 } else {
2438 ioc->busy_level = 0;
ac33e91e 2439 ioc->vtime_err = 0;
7caa4715
TH
2440 ioc->running = IOC_IDLE;
2441 }
ac33e91e
TH
2442
2443 ioc_refresh_vrate(ioc, &now);
7caa4715
TH
2444 }
2445
2446 spin_unlock_irq(&ioc->lock);
2447}
2448
b0853ab4
TH
2449static u64 adjust_inuse_and_calc_cost(struct ioc_gq *iocg, u64 vtime,
2450 u64 abs_cost, struct ioc_now *now)
2451{
2452 struct ioc *ioc = iocg->ioc;
2453 struct ioc_margins *margins = &ioc->margins;
04603755 2454 u32 __maybe_unused old_inuse = iocg->inuse, __maybe_unused old_hwi;
aa67db24 2455 u32 hwi, adj_step;
b0853ab4
TH
2456 s64 margin;
2457 u64 cost, new_inuse;
8d211554 2458 unsigned long flags;
b0853ab4
TH
2459
2460 current_hweight(iocg, NULL, &hwi);
04603755 2461 old_hwi = hwi;
b0853ab4
TH
2462 cost = abs_cost_to_cost(abs_cost, hwi);
2463 margin = now->vnow - vtime - cost;
2464
c421a3eb
TH
2465 /* debt handling owns inuse for debtors */
2466 if (iocg->abs_vdebt)
2467 return cost;
2468
b0853ab4 2469 /*
5ba1add2 2470 * We only increase inuse during period and do so if the margin has
b0853ab4
TH
2471 * deteriorated since the previous adjustment.
2472 */
2473 if (margin >= iocg->saved_margin || margin >= margins->low ||
2474 iocg->inuse == iocg->active)
2475 return cost;
2476
8d211554 2477 spin_lock_irqsave(&ioc->lock, flags);
b0853ab4
TH
2478
2479 /* we own inuse only when @iocg is in the normal active state */
c421a3eb 2480 if (iocg->abs_vdebt || list_empty(&iocg->active_list)) {
8d211554 2481 spin_unlock_irqrestore(&ioc->lock, flags);
b0853ab4
TH
2482 return cost;
2483 }
2484
aa67db24
TH
2485 /*
2486 * Bump up inuse till @abs_cost fits in the existing budget.
2487 * adj_step must be determined after acquiring ioc->lock - we might
2488 * have raced and lost to another thread for activation and could
2489 * be reading 0 iocg->active before ioc->lock which will lead to
2490 * infinite loop.
2491 */
b0853ab4 2492 new_inuse = iocg->inuse;
aa67db24 2493 adj_step = DIV_ROUND_UP(iocg->active * INUSE_ADJ_STEP_PCT, 100);
b0853ab4
TH
2494 do {
2495 new_inuse = new_inuse + adj_step;
2496 propagate_weights(iocg, iocg->active, new_inuse, true, now);
2497 current_hweight(iocg, NULL, &hwi);
2498 cost = abs_cost_to_cost(abs_cost, hwi);
2499 } while (time_after64(vtime + cost, now->vnow) &&
2500 iocg->inuse != iocg->active);
2501
8d211554 2502 spin_unlock_irqrestore(&ioc->lock, flags);
04603755
TH
2503
2504 TRACE_IOCG_PATH(inuse_adjust, iocg, now,
2505 old_inuse, iocg->inuse, old_hwi, hwi);
2506
b0853ab4
TH
2507 return cost;
2508}
2509
7caa4715
TH
2510static void calc_vtime_cost_builtin(struct bio *bio, struct ioc_gq *iocg,
2511 bool is_merge, u64 *costp)
2512{
2513 struct ioc *ioc = iocg->ioc;
2514 u64 coef_seqio, coef_randio, coef_page;
2515 u64 pages = max_t(u64, bio_sectors(bio) >> IOC_SECT_TO_PAGE_SHIFT, 1);
2516 u64 seek_pages = 0;
2517 u64 cost = 0;
2518
2519 switch (bio_op(bio)) {
2520 case REQ_OP_READ:
2521 coef_seqio = ioc->params.lcoefs[LCOEF_RSEQIO];
2522 coef_randio = ioc->params.lcoefs[LCOEF_RRANDIO];
2523 coef_page = ioc->params.lcoefs[LCOEF_RPAGE];
2524 break;
2525 case REQ_OP_WRITE:
2526 coef_seqio = ioc->params.lcoefs[LCOEF_WSEQIO];
2527 coef_randio = ioc->params.lcoefs[LCOEF_WRANDIO];
2528 coef_page = ioc->params.lcoefs[LCOEF_WPAGE];
2529 break;
2530 default:
2531 goto out;
2532 }
2533
2534 if (iocg->cursor) {
2535 seek_pages = abs(bio->bi_iter.bi_sector - iocg->cursor);
2536 seek_pages >>= IOC_SECT_TO_PAGE_SHIFT;
2537 }
2538
2539 if (!is_merge) {
2540 if (seek_pages > LCOEF_RANDIO_PAGES) {
2541 cost += coef_randio;
2542 } else {
2543 cost += coef_seqio;
2544 }
2545 }
2546 cost += pages * coef_page;
2547out:
2548 *costp = cost;
2549}
2550
2551static u64 calc_vtime_cost(struct bio *bio, struct ioc_gq *iocg, bool is_merge)
2552{
2553 u64 cost;
2554
2555 calc_vtime_cost_builtin(bio, iocg, is_merge, &cost);
2556 return cost;
2557}
2558
cd006509
TH
2559static void calc_size_vtime_cost_builtin(struct request *rq, struct ioc *ioc,
2560 u64 *costp)
2561{
2562 unsigned int pages = blk_rq_stats_sectors(rq) >> IOC_SECT_TO_PAGE_SHIFT;
2563
2564 switch (req_op(rq)) {
2565 case REQ_OP_READ:
2566 *costp = pages * ioc->params.lcoefs[LCOEF_RPAGE];
2567 break;
2568 case REQ_OP_WRITE:
2569 *costp = pages * ioc->params.lcoefs[LCOEF_WPAGE];
2570 break;
2571 default:
2572 *costp = 0;
2573 }
2574}
2575
2576static u64 calc_size_vtime_cost(struct request *rq, struct ioc *ioc)
2577{
2578 u64 cost;
2579
2580 calc_size_vtime_cost_builtin(rq, ioc, &cost);
2581 return cost;
2582}
2583
7caa4715
TH
2584static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio)
2585{
2586 struct blkcg_gq *blkg = bio->bi_blkg;
2587 struct ioc *ioc = rqos_to_ioc(rqos);
2588 struct ioc_gq *iocg = blkg_to_iocg(blkg);
2589 struct ioc_now now;
2590 struct iocg_wait wait;
7caa4715 2591 u64 abs_cost, cost, vtime;
da437b95
TH
2592 bool use_debt, ioc_locked;
2593 unsigned long flags;
7caa4715 2594
d16baa3f
TH
2595 /* bypass IOs if disabled, still initializing, or for root cgroup */
2596 if (!ioc->enabled || !iocg || !iocg->level)
7caa4715
TH
2597 return;
2598
7caa4715
TH
2599 /* calculate the absolute vtime cost */
2600 abs_cost = calc_vtime_cost(bio, iocg, false);
2601 if (!abs_cost)
2602 return;
2603
f1de2439
TH
2604 if (!iocg_activate(iocg, &now))
2605 return;
2606
7caa4715 2607 iocg->cursor = bio_end_sector(bio);
7caa4715 2608 vtime = atomic64_read(&iocg->vtime);
b0853ab4 2609 cost = adjust_inuse_and_calc_cost(iocg, vtime, abs_cost, &now);
7caa4715
TH
2610
2611 /*
2612 * If no one's waiting and within budget, issue right away. The
2613 * tests are racy but the races aren't systemic - we only miss once
2614 * in a while which is fine.
2615 */
0b80f986 2616 if (!waitqueue_active(&iocg->waitq) && !iocg->abs_vdebt &&
7caa4715 2617 time_before_eq64(vtime + cost, now.vnow)) {
97eb1975 2618 iocg_commit_bio(iocg, bio, abs_cost, cost);
7caa4715
TH
2619 return;
2620 }
2621
36a52481 2622 /*
da437b95
TH
2623 * We're over budget. This can be handled in two ways. IOs which may
2624 * cause priority inversions are punted to @ioc->aux_iocg and charged as
2625 * debt. Otherwise, the issuer is blocked on @iocg->waitq. Debt handling
2626 * requires @ioc->lock, waitq handling @iocg->waitq.lock. Determine
2627 * whether debt handling is needed and acquire locks accordingly.
0b80f986 2628 */
da437b95
TH
2629 use_debt = bio_issue_as_root_blkg(bio) || fatal_signal_pending(current);
2630 ioc_locked = use_debt || READ_ONCE(iocg->abs_vdebt);
b0853ab4 2631retry_lock:
da437b95
TH
2632 iocg_lock(iocg, ioc_locked, &flags);
2633
2634 /*
2635 * @iocg must stay activated for debt and waitq handling. Deactivation
2636 * is synchronized against both ioc->lock and waitq.lock and we won't
2637 * get deactivated as long as we're waiting or has debt, so we're good
2638 * if we're activated here. In the unlikely cases that we aren't, just
2639 * issue the IO.
2640 */
0b80f986 2641 if (unlikely(list_empty(&iocg->active_list))) {
da437b95 2642 iocg_unlock(iocg, ioc_locked, &flags);
97eb1975 2643 iocg_commit_bio(iocg, bio, abs_cost, cost);
0b80f986
TH
2644 return;
2645 }
2646
2647 /*
2648 * We're over budget. If @bio has to be issued regardless, remember
2649 * the abs_cost instead of advancing vtime. iocg_kick_waitq() will pay
2650 * off the debt before waking more IOs.
2651 *
36a52481 2652 * This way, the debt is continuously paid off each period with the
0b80f986
TH
2653 * actual budget available to the cgroup. If we just wound vtime, we
2654 * would incorrectly use the current hw_inuse for the entire amount
2655 * which, for example, can lead to the cgroup staying blocked for a
2656 * long time even with substantially raised hw_inuse.
2657 *
2658 * An iocg with vdebt should stay online so that the timer can keep
2659 * deducting its vdebt and [de]activate use_delay mechanism
2660 * accordingly. We don't want to race against the timer trying to
2661 * clear them and leave @iocg inactive w/ dangling use_delay heavily
2662 * penalizing the cgroup and its descendants.
36a52481 2663 */
da437b95 2664 if (use_debt) {
c421a3eb 2665 iocg_incur_debt(iocg, abs_cost, &now);
54c52e10 2666 if (iocg_kick_delay(iocg, &now))
ba91c849 2667 blkcg_schedule_throttle(rqos->disk,
d7bd15a1 2668 (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
da437b95 2669 iocg_unlock(iocg, ioc_locked, &flags);
7caa4715
TH
2670 return;
2671 }
2672
b0853ab4 2673 /* guarantee that iocgs w/ waiters have maximum inuse */
c421a3eb 2674 if (!iocg->abs_vdebt && iocg->inuse != iocg->active) {
b0853ab4
TH
2675 if (!ioc_locked) {
2676 iocg_unlock(iocg, false, &flags);
2677 ioc_locked = true;
2678 goto retry_lock;
2679 }
2680 propagate_weights(iocg, iocg->active, iocg->active, true,
2681 &now);
2682 }
2683
7caa4715
TH
2684 /*
2685 * Append self to the waitq and schedule the wakeup timer if we're
2686 * the first waiter. The timer duration is calculated based on the
2687 * current vrate. vtime and hweight changes can make it too short
2688 * or too long. Each wait entry records the absolute cost it's
2689 * waiting for to allow re-evaluation using a custom wait entry.
2690 *
2691 * If too short, the timer simply reschedules itself. If too long,
2692 * the period timer will notice and trigger wakeups.
2693 *
2694 * All waiters are on iocg->waitq and the wait states are
2695 * synchronized using waitq.lock.
2696 */
7caa4715
TH
2697 init_waitqueue_func_entry(&wait.wait, iocg_wake_fn);
2698 wait.wait.private = current;
2699 wait.bio = bio;
2700 wait.abs_cost = abs_cost;
2701 wait.committed = false; /* will be set true by waker */
2702
2703 __add_wait_queue_entry_tail(&iocg->waitq, &wait.wait);
da437b95 2704 iocg_kick_waitq(iocg, ioc_locked, &now);
7caa4715 2705
da437b95 2706 iocg_unlock(iocg, ioc_locked, &flags);
7caa4715
TH
2707
2708 while (true) {
2709 set_current_state(TASK_UNINTERRUPTIBLE);
2710 if (wait.committed)
2711 break;
2712 io_schedule();
2713 }
2714
2715 /* waker already committed us, proceed */
2716 finish_wait(&iocg->waitq, &wait.wait);
2717}
2718
2719static void ioc_rqos_merge(struct rq_qos *rqos, struct request *rq,
2720 struct bio *bio)
2721{
2722 struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg);
d16baa3f 2723 struct ioc *ioc = rqos_to_ioc(rqos);
7caa4715 2724 sector_t bio_end = bio_end_sector(bio);
e1518f63 2725 struct ioc_now now;
b0853ab4 2726 u64 vtime, abs_cost, cost;
0b80f986 2727 unsigned long flags;
7caa4715 2728
d16baa3f
TH
2729 /* bypass if disabled, still initializing, or for root cgroup */
2730 if (!ioc->enabled || !iocg || !iocg->level)
7caa4715
TH
2731 return;
2732
2733 abs_cost = calc_vtime_cost(bio, iocg, true);
2734 if (!abs_cost)
2735 return;
2736
e1518f63 2737 ioc_now(ioc, &now);
b0853ab4
TH
2738
2739 vtime = atomic64_read(&iocg->vtime);
2740 cost = adjust_inuse_and_calc_cost(iocg, vtime, abs_cost, &now);
e1518f63 2741
7caa4715
TH
2742 /* update cursor if backmerging into the request at the cursor */
2743 if (blk_rq_pos(rq) < bio_end &&
2744 blk_rq_pos(rq) + blk_rq_sectors(rq) == iocg->cursor)
2745 iocg->cursor = bio_end;
2746
e1518f63 2747 /*
0b80f986
TH
2748 * Charge if there's enough vtime budget and the existing request has
2749 * cost assigned.
e1518f63
TH
2750 */
2751 if (rq->bio && rq->bio->bi_iocost_cost &&
0b80f986 2752 time_before_eq64(atomic64_read(&iocg->vtime) + cost, now.vnow)) {
97eb1975 2753 iocg_commit_bio(iocg, bio, abs_cost, cost);
0b80f986
TH
2754 return;
2755 }
2756
2757 /*
2758 * Otherwise, account it as debt if @iocg is online, which it should
2759 * be for the vast majority of cases. See debt handling in
2760 * ioc_rqos_throttle() for details.
2761 */
c421a3eb
TH
2762 spin_lock_irqsave(&ioc->lock, flags);
2763 spin_lock(&iocg->waitq.lock);
2764
0b80f986 2765 if (likely(!list_empty(&iocg->active_list))) {
c421a3eb
TH
2766 iocg_incur_debt(iocg, abs_cost, &now);
2767 if (iocg_kick_delay(iocg, &now))
ba91c849 2768 blkcg_schedule_throttle(rqos->disk,
c421a3eb 2769 (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
0b80f986 2770 } else {
97eb1975 2771 iocg_commit_bio(iocg, bio, abs_cost, cost);
0b80f986 2772 }
c421a3eb
TH
2773
2774 spin_unlock(&iocg->waitq.lock);
2775 spin_unlock_irqrestore(&ioc->lock, flags);
7caa4715
TH
2776}
2777
2778static void ioc_rqos_done_bio(struct rq_qos *rqos, struct bio *bio)
2779{
2780 struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg);
2781
2782 if (iocg && bio->bi_iocost_cost)
2783 atomic64_add(bio->bi_iocost_cost, &iocg->done_vtime);
2784}
2785
2786static void ioc_rqos_done(struct rq_qos *rqos, struct request *rq)
2787{
2788 struct ioc *ioc = rqos_to_ioc(rqos);
5e124f74 2789 struct ioc_pcpu_stat *ccs;
cd006509 2790 u64 on_q_ns, rq_wait_ns, size_nsec;
7caa4715
TH
2791 int pidx, rw;
2792
2793 if (!ioc->enabled || !rq->alloc_time_ns || !rq->start_time_ns)
2794 return;
2795
62c159a0 2796 switch (req_op(rq)) {
7caa4715
TH
2797 case REQ_OP_READ:
2798 pidx = QOS_RLAT;
2799 rw = READ;
2800 break;
2801 case REQ_OP_WRITE:
2802 pidx = QOS_WLAT;
2803 rw = WRITE;
2804 break;
2805 default:
2806 return;
2807 }
2808
2809 on_q_ns = ktime_get_ns() - rq->alloc_time_ns;
2810 rq_wait_ns = rq->start_time_ns - rq->alloc_time_ns;
cd006509 2811 size_nsec = div64_u64(calc_size_vtime_cost(rq, ioc), VTIME_PER_NSEC);
7caa4715 2812
5e124f74
TH
2813 ccs = get_cpu_ptr(ioc->pcpu_stat);
2814
cd006509
TH
2815 if (on_q_ns <= size_nsec ||
2816 on_q_ns - size_nsec <= ioc->params.qos[pidx] * NSEC_PER_USEC)
5e124f74 2817 local_inc(&ccs->missed[rw].nr_met);
7caa4715 2818 else
5e124f74
TH
2819 local_inc(&ccs->missed[rw].nr_missed);
2820
2821 local64_add(rq_wait_ns, &ccs->rq_wait_ns);
7caa4715 2822
5e124f74 2823 put_cpu_ptr(ccs);
7caa4715
TH
2824}
2825
2826static void ioc_rqos_queue_depth_changed(struct rq_qos *rqos)
2827{
2828 struct ioc *ioc = rqos_to_ioc(rqos);
2829
2830 spin_lock_irq(&ioc->lock);
2831 ioc_refresh_params(ioc, false);
2832 spin_unlock_irq(&ioc->lock);
2833}
2834
2835static void ioc_rqos_exit(struct rq_qos *rqos)
2836{
2837 struct ioc *ioc = rqos_to_ioc(rqos);
2838
40e4996e 2839 blkcg_deactivate_policy(rqos->disk, &blkcg_policy_iocost);
7caa4715
TH
2840
2841 spin_lock_irq(&ioc->lock);
2842 ioc->running = IOC_STOP;
2843 spin_unlock_irq(&ioc->lock);
2844
292a089d 2845 timer_shutdown_sync(&ioc->timer);
7caa4715
TH
2846 free_percpu(ioc->pcpu_stat);
2847 kfree(ioc);
2848}
2849
3963d84d 2850static const struct rq_qos_ops ioc_rqos_ops = {
7caa4715
TH
2851 .throttle = ioc_rqos_throttle,
2852 .merge = ioc_rqos_merge,
2853 .done_bio = ioc_rqos_done_bio,
2854 .done = ioc_rqos_done,
2855 .queue_depth_changed = ioc_rqos_queue_depth_changed,
2856 .exit = ioc_rqos_exit,
2857};
2858
57b64554 2859static int blk_iocost_init(struct gendisk *disk)
7caa4715
TH
2860{
2861 struct ioc *ioc;
5e124f74 2862 int i, cpu, ret;
7caa4715
TH
2863
2864 ioc = kzalloc(sizeof(*ioc), GFP_KERNEL);
2865 if (!ioc)
2866 return -ENOMEM;
2867
2868 ioc->pcpu_stat = alloc_percpu(struct ioc_pcpu_stat);
2869 if (!ioc->pcpu_stat) {
2870 kfree(ioc);
2871 return -ENOMEM;
2872 }
2873
5e124f74
TH
2874 for_each_possible_cpu(cpu) {
2875 struct ioc_pcpu_stat *ccs = per_cpu_ptr(ioc->pcpu_stat, cpu);
2876
2877 for (i = 0; i < ARRAY_SIZE(ccs->missed); i++) {
2878 local_set(&ccs->missed[i].nr_met, 0);
2879 local_set(&ccs->missed[i].nr_missed, 0);
2880 }
2881 local64_set(&ccs->rq_wait_ns, 0);
2882 }
2883
7caa4715
TH
2884 spin_lock_init(&ioc->lock);
2885 timer_setup(&ioc->timer, ioc_timer_fn, 0);
2886 INIT_LIST_HEAD(&ioc->active_iocgs);
2887
2888 ioc->running = IOC_IDLE;
ac33e91e 2889 ioc->vtime_base_rate = VTIME_PER_USEC;
7caa4715 2890 atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC);
67b7b641 2891 seqcount_spinlock_init(&ioc->period_seqcount, &ioc->lock);
7caa4715
TH
2892 ioc->period_at = ktime_to_us(ktime_get());
2893 atomic64_set(&ioc->cur_period, 0);
2894 atomic_set(&ioc->hweight_gen, 0);
2895
2896 spin_lock_irq(&ioc->lock);
2897 ioc->autop_idx = AUTOP_INVALID;
e33b9365 2898 ioc_refresh_params_disk(ioc, true, disk);
7caa4715
TH
2899 spin_unlock_irq(&ioc->lock);
2900
d16baa3f 2901 /*
7a88b1a8 2902 * rqos must be added before activation to allow ioc_pd_init() to
d16baa3f
TH
2903 * lookup the ioc from q. This means that the rqos methods may get
2904 * called before policy activation completion, can't assume that the
2905 * target bio has an iocg associated and need to test for NULL iocg.
2906 */
ce57b558 2907 ret = rq_qos_add(&ioc->rqos, disk, RQ_QOS_COST, &ioc_rqos_ops);
14a6e2eb
JH
2908 if (ret)
2909 goto err_free_ioc;
2910
40e4996e 2911 ret = blkcg_activate_policy(disk, &blkcg_policy_iocost);
14a6e2eb
JH
2912 if (ret)
2913 goto err_del_qos;
7caa4715 2914 return 0;
14a6e2eb
JH
2915
2916err_del_qos:
ce57b558 2917 rq_qos_del(&ioc->rqos);
14a6e2eb
JH
2918err_free_ioc:
2919 free_percpu(ioc->pcpu_stat);
2920 kfree(ioc);
2921 return ret;
7caa4715
TH
2922}
2923
2924static struct blkcg_policy_data *ioc_cpd_alloc(gfp_t gfp)
2925{
2926 struct ioc_cgrp *iocc;
2927
2928 iocc = kzalloc(sizeof(struct ioc_cgrp), gfp);
e916ad29
TH
2929 if (!iocc)
2930 return NULL;
7caa4715 2931
bd0adb91 2932 iocc->dfl_weight = CGROUP_WEIGHT_DFL * WEIGHT_ONE;
7caa4715
TH
2933 return &iocc->cpd;
2934}
2935
2936static void ioc_cpd_free(struct blkcg_policy_data *cpd)
2937{
2938 kfree(container_of(cpd, struct ioc_cgrp, cpd));
2939}
2940
0a0b4f79
CH
2941static struct blkg_policy_data *ioc_pd_alloc(struct gendisk *disk,
2942 struct blkcg *blkcg, gfp_t gfp)
7caa4715
TH
2943{
2944 int levels = blkcg->css.cgroup->level + 1;
2945 struct ioc_gq *iocg;
2946
0a0b4f79
CH
2947 iocg = kzalloc_node(struct_size(iocg, ancestors, levels), gfp,
2948 disk->node_id);
7caa4715
TH
2949 if (!iocg)
2950 return NULL;
2951
97eb1975
TH
2952 iocg->pcpu_stat = alloc_percpu_gfp(struct iocg_pcpu_stat, gfp);
2953 if (!iocg->pcpu_stat) {
2954 kfree(iocg);
2955 return NULL;
2956 }
2957
7caa4715
TH
2958 return &iocg->pd;
2959}
2960
2961static void ioc_pd_init(struct blkg_policy_data *pd)
2962{
2963 struct ioc_gq *iocg = pd_to_iocg(pd);
2964 struct blkcg_gq *blkg = pd_to_blkg(&iocg->pd);
a06377c5 2965 struct ioc *ioc = q_to_ioc(blkg->q);
7caa4715
TH
2966 struct ioc_now now;
2967 struct blkcg_gq *tblkg;
2968 unsigned long flags;
2969
2970 ioc_now(ioc, &now);
2971
2972 iocg->ioc = ioc;
2973 atomic64_set(&iocg->vtime, now.vnow);
2974 atomic64_set(&iocg->done_vtime, now.vnow);
2975 atomic64_set(&iocg->active_period, atomic64_read(&ioc->cur_period));
2976 INIT_LIST_HEAD(&iocg->active_list);
97eb1975 2977 INIT_LIST_HEAD(&iocg->walk_list);
8692d2db 2978 INIT_LIST_HEAD(&iocg->surplus_list);
fe20cdb5
TH
2979 iocg->hweight_active = WEIGHT_ONE;
2980 iocg->hweight_inuse = WEIGHT_ONE;
7caa4715
TH
2981
2982 init_waitqueue_head(&iocg->waitq);
2983 hrtimer_init(&iocg->waitq_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
2984 iocg->waitq_timer.function = iocg_waitq_timer_fn;
7caa4715
TH
2985
2986 iocg->level = blkg->blkcg->css.cgroup->level;
2987
2988 for (tblkg = blkg; tblkg; tblkg = tblkg->parent) {
2989 struct ioc_gq *tiocg = blkg_to_iocg(tblkg);
2990 iocg->ancestors[tiocg->level] = tiocg;
2991 }
2992
2993 spin_lock_irqsave(&ioc->lock, flags);
b0853ab4 2994 weight_updated(iocg, &now);
7caa4715
TH
2995 spin_unlock_irqrestore(&ioc->lock, flags);
2996}
2997
2998static void ioc_pd_free(struct blkg_policy_data *pd)
2999{
3000 struct ioc_gq *iocg = pd_to_iocg(pd);
3001 struct ioc *ioc = iocg->ioc;
5aeac7c4 3002 unsigned long flags;
7caa4715
TH
3003
3004 if (ioc) {
5aeac7c4 3005 spin_lock_irqsave(&ioc->lock, flags);
97eb1975 3006
7caa4715 3007 if (!list_empty(&iocg->active_list)) {
b0853ab4
TH
3008 struct ioc_now now;
3009
3010 ioc_now(ioc, &now);
3011 propagate_weights(iocg, 0, 0, false, &now);
7caa4715
TH
3012 list_del_init(&iocg->active_list);
3013 }
97eb1975
TH
3014
3015 WARN_ON_ONCE(!list_empty(&iocg->walk_list));
8692d2db 3016 WARN_ON_ONCE(!list_empty(&iocg->surplus_list));
97eb1975 3017
5aeac7c4 3018 spin_unlock_irqrestore(&ioc->lock, flags);
e036c4ca
TH
3019
3020 hrtimer_cancel(&iocg->waitq_timer);
7caa4715 3021 }
97eb1975 3022 free_percpu(iocg->pcpu_stat);
7caa4715
TH
3023 kfree(iocg);
3024}
3025
3607849d 3026static void ioc_pd_stat(struct blkg_policy_data *pd, struct seq_file *s)
97eb1975
TH
3027{
3028 struct ioc_gq *iocg = pd_to_iocg(pd);
3029 struct ioc *ioc = iocg->ioc;
97eb1975
TH
3030
3031 if (!ioc->enabled)
3607849d 3032 return;
97eb1975
TH
3033
3034 if (iocg->level == 0) {
3035 unsigned vp10k = DIV64_U64_ROUND_CLOSEST(
ac33e91e 3036 ioc->vtime_base_rate * 10000,
97eb1975 3037 VTIME_PER_USEC);
252c651a 3038 seq_printf(s, " cost.vrate=%u.%02u", vp10k / 100, vp10k % 100);
97eb1975
TH
3039 }
3040
252c651a 3041 seq_printf(s, " cost.usage=%llu", iocg->last_stat.usage_us);
97eb1975 3042
f0bf84a5 3043 if (blkcg_debug_stats)
252c651a
CH
3044 seq_printf(s, " cost.wait=%llu cost.indebt=%llu cost.indelay=%llu",
3045 iocg->last_stat.wait_us,
3046 iocg->last_stat.indebt_us,
3047 iocg->last_stat.indelay_us);
97eb1975
TH
3048}
3049
7caa4715
TH
3050static u64 ioc_weight_prfill(struct seq_file *sf, struct blkg_policy_data *pd,
3051 int off)
3052{
3053 const char *dname = blkg_dev_name(pd->blkg);
3054 struct ioc_gq *iocg = pd_to_iocg(pd);
3055
3056 if (dname && iocg->cfg_weight)
bd0adb91 3057 seq_printf(sf, "%s %u\n", dname, iocg->cfg_weight / WEIGHT_ONE);
7caa4715
TH
3058 return 0;
3059}
3060
3061
3062static int ioc_weight_show(struct seq_file *sf, void *v)
3063{
3064 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
3065 struct ioc_cgrp *iocc = blkcg_to_iocc(blkcg);
3066
bd0adb91 3067 seq_printf(sf, "default %u\n", iocc->dfl_weight / WEIGHT_ONE);
7caa4715
TH
3068 blkcg_print_blkgs(sf, blkcg, ioc_weight_prfill,
3069 &blkcg_policy_iocost, seq_cft(sf)->private, false);
3070 return 0;
3071}
3072
3073static ssize_t ioc_weight_write(struct kernfs_open_file *of, char *buf,
3074 size_t nbytes, loff_t off)
3075{
3076 struct blkcg *blkcg = css_to_blkcg(of_css(of));
3077 struct ioc_cgrp *iocc = blkcg_to_iocc(blkcg);
3078 struct blkg_conf_ctx ctx;
b0853ab4 3079 struct ioc_now now;
7caa4715
TH
3080 struct ioc_gq *iocg;
3081 u32 v;
3082 int ret;
3083
3084 if (!strchr(buf, ':')) {
3085 struct blkcg_gq *blkg;
3086
3087 if (!sscanf(buf, "default %u", &v) && !sscanf(buf, "%u", &v))
3088 return -EINVAL;
3089
3090 if (v < CGROUP_WEIGHT_MIN || v > CGROUP_WEIGHT_MAX)
3091 return -EINVAL;
3092
11431e26 3093 spin_lock_irq(&blkcg->lock);
bd0adb91 3094 iocc->dfl_weight = v * WEIGHT_ONE;
7caa4715
TH
3095 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
3096 struct ioc_gq *iocg = blkg_to_iocg(blkg);
3097
3098 if (iocg) {
11431e26 3099 spin_lock(&iocg->ioc->lock);
b0853ab4
TH
3100 ioc_now(iocg->ioc, &now);
3101 weight_updated(iocg, &now);
11431e26 3102 spin_unlock(&iocg->ioc->lock);
7caa4715
TH
3103 }
3104 }
11431e26 3105 spin_unlock_irq(&blkcg->lock);
7caa4715
TH
3106
3107 return nbytes;
3108 }
3109
faffaab2
TH
3110 blkg_conf_init(&ctx, buf);
3111
3112 ret = blkg_conf_prep(blkcg, &blkcg_policy_iocost, &ctx);
7caa4715 3113 if (ret)
faffaab2 3114 goto err;
7caa4715
TH
3115
3116 iocg = blkg_to_iocg(ctx.blkg);
3117
3118 if (!strncmp(ctx.body, "default", 7)) {
3119 v = 0;
3120 } else {
3121 if (!sscanf(ctx.body, "%u", &v))
3122 goto einval;
3123 if (v < CGROUP_WEIGHT_MIN || v > CGROUP_WEIGHT_MAX)
3124 goto einval;
3125 }
3126
41591a51 3127 spin_lock(&iocg->ioc->lock);
bd0adb91 3128 iocg->cfg_weight = v * WEIGHT_ONE;
b0853ab4
TH
3129 ioc_now(iocg->ioc, &now);
3130 weight_updated(iocg, &now);
41591a51 3131 spin_unlock(&iocg->ioc->lock);
7caa4715 3132
faffaab2 3133 blkg_conf_exit(&ctx);
7caa4715
TH
3134 return nbytes;
3135
3136einval:
faffaab2
TH
3137 ret = -EINVAL;
3138err:
3139 blkg_conf_exit(&ctx);
3140 return ret;
7caa4715
TH
3141}
3142
3143static u64 ioc_qos_prfill(struct seq_file *sf, struct blkg_policy_data *pd,
3144 int off)
3145{
3146 const char *dname = blkg_dev_name(pd->blkg);
3147 struct ioc *ioc = pd_to_iocg(pd)->ioc;
3148
3149 if (!dname)
3150 return 0;
3151
35198e32 3152 spin_lock_irq(&ioc->lock);
7caa4715
TH
3153 seq_printf(sf, "%s enable=%d ctrl=%s rpct=%u.%02u rlat=%u wpct=%u.%02u wlat=%u min=%u.%02u max=%u.%02u\n",
3154 dname, ioc->enabled, ioc->user_qos_params ? "user" : "auto",
3155 ioc->params.qos[QOS_RPPM] / 10000,
3156 ioc->params.qos[QOS_RPPM] % 10000 / 100,
3157 ioc->params.qos[QOS_RLAT],
3158 ioc->params.qos[QOS_WPPM] / 10000,
3159 ioc->params.qos[QOS_WPPM] % 10000 / 100,
3160 ioc->params.qos[QOS_WLAT],
3161 ioc->params.qos[QOS_MIN] / 10000,
3162 ioc->params.qos[QOS_MIN] % 10000 / 100,
3163 ioc->params.qos[QOS_MAX] / 10000,
3164 ioc->params.qos[QOS_MAX] % 10000 / 100);
35198e32 3165 spin_unlock_irq(&ioc->lock);
7caa4715
TH
3166 return 0;
3167}
3168
3169static int ioc_qos_show(struct seq_file *sf, void *v)
3170{
3171 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
3172
3173 blkcg_print_blkgs(sf, blkcg, ioc_qos_prfill,
3174 &blkcg_policy_iocost, seq_cft(sf)->private, false);
3175 return 0;
3176}
3177
3178static const match_table_t qos_ctrl_tokens = {
3179 { QOS_ENABLE, "enable=%u" },
3180 { QOS_CTRL, "ctrl=%s" },
3181 { NR_QOS_CTRL_PARAMS, NULL },
3182};
3183
3184static const match_table_t qos_tokens = {
3185 { QOS_RPPM, "rpct=%s" },
3186 { QOS_RLAT, "rlat=%u" },
3187 { QOS_WPPM, "wpct=%s" },
3188 { QOS_WLAT, "wlat=%u" },
3189 { QOS_MIN, "min=%s" },
3190 { QOS_MAX, "max=%s" },
3191 { NR_QOS_PARAMS, NULL },
3192};
3193
3194static ssize_t ioc_qos_write(struct kernfs_open_file *of, char *input,
3195 size_t nbytes, loff_t off)
3196{
faffaab2 3197 struct blkg_conf_ctx ctx;
3657647e 3198 struct gendisk *disk;
7caa4715
TH
3199 struct ioc *ioc;
3200 u32 qos[NR_QOS_PARAMS];
3201 bool enable, user;
faffaab2 3202 char *body, *p;
7caa4715
TH
3203 int ret;
3204
faffaab2 3205 blkg_conf_init(&ctx, input);
7caa4715 3206
faffaab2
TH
3207 ret = blkg_conf_open_bdev(&ctx);
3208 if (ret)
3209 goto err;
3210
3211 body = ctx.body;
3212 disk = ctx.bdev->bd_disk;
235a5a83
YK
3213 if (!queue_is_mq(disk->queue)) {
3214 ret = -EOPNOTSUPP;
3215 goto err;
3216 }
3217
3657647e 3218 ioc = q_to_ioc(disk->queue);
7caa4715 3219 if (!ioc) {
3657647e 3220 ret = blk_iocost_init(disk);
7caa4715
TH
3221 if (ret)
3222 goto err;
3657647e 3223 ioc = q_to_ioc(disk->queue);
7caa4715
TH
3224 }
3225
2b2da2f6
YK
3226 blk_mq_freeze_queue(disk->queue);
3227 blk_mq_quiesce_queue(disk->queue);
3228
7caa4715
TH
3229 spin_lock_irq(&ioc->lock);
3230 memcpy(qos, ioc->params.qos, sizeof(qos));
3231 enable = ioc->enabled;
3232 user = ioc->user_qos_params;
7caa4715 3233
faffaab2 3234 while ((p = strsep(&body, " \t\n"))) {
7caa4715
TH
3235 substring_t args[MAX_OPT_ARGS];
3236 char buf[32];
3237 int tok;
3238 s64 v;
3239
3240 if (!*p)
3241 continue;
3242
3243 switch (match_token(p, qos_ctrl_tokens, args)) {
3244 case QOS_ENABLE:
7b7c5ae4
YK
3245 if (match_u64(&args[0], &v))
3246 goto einval;
7caa4715
TH
3247 enable = v;
3248 continue;
3249 case QOS_CTRL:
3250 match_strlcpy(buf, &args[0], sizeof(buf));
3251 if (!strcmp(buf, "auto"))
3252 user = false;
3253 else if (!strcmp(buf, "user"))
3254 user = true;
3255 else
3256 goto einval;
3257 continue;
3258 }
3259
3260 tok = match_token(p, qos_tokens, args);
3261 switch (tok) {
3262 case QOS_RPPM:
3263 case QOS_WPPM:
3264 if (match_strlcpy(buf, &args[0], sizeof(buf)) >=
3265 sizeof(buf))
3266 goto einval;
3267 if (cgroup_parse_float(buf, 2, &v))
3268 goto einval;
3269 if (v < 0 || v > 10000)
3270 goto einval;
3271 qos[tok] = v * 100;
3272 break;
3273 case QOS_RLAT:
3274 case QOS_WLAT:
3275 if (match_u64(&args[0], &v))
3276 goto einval;
3277 qos[tok] = v;
3278 break;
3279 case QOS_MIN:
3280 case QOS_MAX:
3281 if (match_strlcpy(buf, &args[0], sizeof(buf)) >=
3282 sizeof(buf))
3283 goto einval;
3284 if (cgroup_parse_float(buf, 2, &v))
3285 goto einval;
3286 if (v < 0)
3287 goto einval;
3288 qos[tok] = clamp_t(s64, v * 100,
3289 VRATE_MIN_PPM, VRATE_MAX_PPM);
3290 break;
3291 default:
3292 goto einval;
3293 }
3294 user = true;
3295 }
3296
3297 if (qos[QOS_MIN] > qos[QOS_MAX])
3298 goto einval;
3299
7caa4715 3300 if (enable) {
3657647e
CH
3301 blk_stat_enable_accounting(disk->queue);
3302 blk_queue_flag_set(QUEUE_FLAG_RQ_ALLOC_TIME, disk->queue);
7caa4715 3303 ioc->enabled = true;
04aad37b 3304 wbt_disable_default(disk);
7caa4715 3305 } else {
3657647e 3306 blk_queue_flag_clear(QUEUE_FLAG_RQ_ALLOC_TIME, disk->queue);
7caa4715 3307 ioc->enabled = false;
04aad37b 3308 wbt_enable_default(disk);
7caa4715
TH
3309 }
3310
3311 if (user) {
3312 memcpy(ioc->params.qos, qos, sizeof(qos));
3313 ioc->user_qos_params = true;
3314 } else {
3315 ioc->user_qos_params = false;
3316 }
3317
3318 ioc_refresh_params(ioc, true);
3319 spin_unlock_irq(&ioc->lock);
3320
2b2da2f6
YK
3321 blk_mq_unquiesce_queue(disk->queue);
3322 blk_mq_unfreeze_queue(disk->queue);
3323
faffaab2 3324 blkg_conf_exit(&ctx);
7caa4715
TH
3325 return nbytes;
3326einval:
2c064798 3327 spin_unlock_irq(&ioc->lock);
2b2da2f6
YK
3328
3329 blk_mq_unquiesce_queue(disk->queue);
3330 blk_mq_unfreeze_queue(disk->queue);
3331
7caa4715
TH
3332 ret = -EINVAL;
3333err:
faffaab2 3334 blkg_conf_exit(&ctx);
7caa4715
TH
3335 return ret;
3336}
3337
3338static u64 ioc_cost_model_prfill(struct seq_file *sf,
3339 struct blkg_policy_data *pd, int off)
3340{
3341 const char *dname = blkg_dev_name(pd->blkg);
3342 struct ioc *ioc = pd_to_iocg(pd)->ioc;
3343 u64 *u = ioc->params.i_lcoefs;
3344
3345 if (!dname)
3346 return 0;
3347
35198e32 3348 spin_lock_irq(&ioc->lock);
7caa4715
TH
3349 seq_printf(sf, "%s ctrl=%s model=linear "
3350 "rbps=%llu rseqiops=%llu rrandiops=%llu "
3351 "wbps=%llu wseqiops=%llu wrandiops=%llu\n",
3352 dname, ioc->user_cost_model ? "user" : "auto",
3353 u[I_LCOEF_RBPS], u[I_LCOEF_RSEQIOPS], u[I_LCOEF_RRANDIOPS],
3354 u[I_LCOEF_WBPS], u[I_LCOEF_WSEQIOPS], u[I_LCOEF_WRANDIOPS]);
35198e32 3355 spin_unlock_irq(&ioc->lock);
7caa4715
TH
3356 return 0;
3357}
3358
3359static int ioc_cost_model_show(struct seq_file *sf, void *v)
3360{
3361 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
3362
3363 blkcg_print_blkgs(sf, blkcg, ioc_cost_model_prfill,
3364 &blkcg_policy_iocost, seq_cft(sf)->private, false);
3365 return 0;
3366}
3367
3368static const match_table_t cost_ctrl_tokens = {
3369 { COST_CTRL, "ctrl=%s" },
3370 { COST_MODEL, "model=%s" },
3371 { NR_COST_CTRL_PARAMS, NULL },
3372};
3373
3374static const match_table_t i_lcoef_tokens = {
3375 { I_LCOEF_RBPS, "rbps=%u" },
3376 { I_LCOEF_RSEQIOPS, "rseqiops=%u" },
3377 { I_LCOEF_RRANDIOPS, "rrandiops=%u" },
3378 { I_LCOEF_WBPS, "wbps=%u" },
3379 { I_LCOEF_WSEQIOPS, "wseqiops=%u" },
3380 { I_LCOEF_WRANDIOPS, "wrandiops=%u" },
3381 { NR_I_LCOEFS, NULL },
3382};
3383
3384static ssize_t ioc_cost_model_write(struct kernfs_open_file *of, char *input,
3385 size_t nbytes, loff_t off)
3386{
faffaab2 3387 struct blkg_conf_ctx ctx;
2b2da2f6 3388 struct request_queue *q;
7caa4715
TH
3389 struct ioc *ioc;
3390 u64 u[NR_I_LCOEFS];
3391 bool user;
faffaab2 3392 char *body, *p;
7caa4715
TH
3393 int ret;
3394
faffaab2
TH
3395 blkg_conf_init(&ctx, input);
3396
3397 ret = blkg_conf_open_bdev(&ctx);
3398 if (ret)
3399 goto err;
7caa4715 3400
faffaab2
TH
3401 body = ctx.body;
3402 q = bdev_get_queue(ctx.bdev);
235a5a83
YK
3403 if (!queue_is_mq(q)) {
3404 ret = -EOPNOTSUPP;
3405 goto err;
3406 }
3407
2b2da2f6 3408 ioc = q_to_ioc(q);
7caa4715 3409 if (!ioc) {
faffaab2 3410 ret = blk_iocost_init(ctx.bdev->bd_disk);
7caa4715
TH
3411 if (ret)
3412 goto err;
2b2da2f6 3413 ioc = q_to_ioc(q);
7caa4715
TH
3414 }
3415
2b2da2f6
YK
3416 blk_mq_freeze_queue(q);
3417 blk_mq_quiesce_queue(q);
3418
7caa4715
TH
3419 spin_lock_irq(&ioc->lock);
3420 memcpy(u, ioc->params.i_lcoefs, sizeof(u));
3421 user = ioc->user_cost_model;
7caa4715 3422
faffaab2 3423 while ((p = strsep(&body, " \t\n"))) {
7caa4715
TH
3424 substring_t args[MAX_OPT_ARGS];
3425 char buf[32];
3426 int tok;
3427 u64 v;
3428
3429 if (!*p)
3430 continue;
3431
3432 switch (match_token(p, cost_ctrl_tokens, args)) {
3433 case COST_CTRL:
3434 match_strlcpy(buf, &args[0], sizeof(buf));
3435 if (!strcmp(buf, "auto"))
3436 user = false;
3437 else if (!strcmp(buf, "user"))
3438 user = true;
3439 else
3440 goto einval;
3441 continue;
3442 case COST_MODEL:
3443 match_strlcpy(buf, &args[0], sizeof(buf));
3444 if (strcmp(buf, "linear"))
3445 goto einval;
3446 continue;
3447 }
3448
3449 tok = match_token(p, i_lcoef_tokens, args);
3450 if (tok == NR_I_LCOEFS)
3451 goto einval;
3452 if (match_u64(&args[0], &v))
3453 goto einval;
3454 u[tok] = v;
3455 user = true;
3456 }
3457
7caa4715
TH
3458 if (user) {
3459 memcpy(ioc->params.i_lcoefs, u, sizeof(u));
3460 ioc->user_cost_model = true;
3461 } else {
3462 ioc->user_cost_model = false;
3463 }
3464 ioc_refresh_params(ioc, true);
3465 spin_unlock_irq(&ioc->lock);
3466
2b2da2f6
YK
3467 blk_mq_unquiesce_queue(q);
3468 blk_mq_unfreeze_queue(q);
3469
faffaab2 3470 blkg_conf_exit(&ctx);
7caa4715
TH
3471 return nbytes;
3472
3473einval:
2c064798 3474 spin_unlock_irq(&ioc->lock);
2b2da2f6
YK
3475
3476 blk_mq_unquiesce_queue(q);
3477 blk_mq_unfreeze_queue(q);
3478
7caa4715
TH
3479 ret = -EINVAL;
3480err:
faffaab2 3481 blkg_conf_exit(&ctx);
7caa4715
TH
3482 return ret;
3483}
3484
3485static struct cftype ioc_files[] = {
3486 {
3487 .name = "weight",
3488 .flags = CFTYPE_NOT_ON_ROOT,
3489 .seq_show = ioc_weight_show,
3490 .write = ioc_weight_write,
3491 },
3492 {
3493 .name = "cost.qos",
3494 .flags = CFTYPE_ONLY_ON_ROOT,
3495 .seq_show = ioc_qos_show,
3496 .write = ioc_qos_write,
3497 },
3498 {
3499 .name = "cost.model",
3500 .flags = CFTYPE_ONLY_ON_ROOT,
3501 .seq_show = ioc_cost_model_show,
3502 .write = ioc_cost_model_write,
3503 },
3504 {}
3505};
3506
3507static struct blkcg_policy blkcg_policy_iocost = {
3508 .dfl_cftypes = ioc_files,
3509 .cpd_alloc_fn = ioc_cpd_alloc,
3510 .cpd_free_fn = ioc_cpd_free,
3511 .pd_alloc_fn = ioc_pd_alloc,
3512 .pd_init_fn = ioc_pd_init,
3513 .pd_free_fn = ioc_pd_free,
97eb1975 3514 .pd_stat_fn = ioc_pd_stat,
7caa4715
TH
3515};
3516
3517static int __init ioc_init(void)
3518{
3519 return blkcg_policy_register(&blkcg_policy_iocost);
3520}
3521
3522static void __exit ioc_exit(void)
3523{
fa1c3eaf 3524 blkcg_policy_unregister(&blkcg_policy_iocost);
7caa4715
TH
3525}
3526
3527module_init(ioc_init);
3528module_exit(ioc_exit);