Commit | Line | Data |
---|---|---|
7caa4715 TH |
1 | /* SPDX-License-Identifier: GPL-2.0 |
2 | * | |
3 | * IO cost model based controller. | |
4 | * | |
5 | * Copyright (C) 2019 Tejun Heo <tj@kernel.org> | |
6 | * Copyright (C) 2019 Andy Newell <newella@fb.com> | |
7 | * Copyright (C) 2019 Facebook | |
8 | * | |
9 | * One challenge of controlling IO resources is the lack of trivially | |
10 | * observable cost metric. This is distinguished from CPU and memory where | |
11 | * wallclock time and the number of bytes can serve as accurate enough | |
12 | * approximations. | |
13 | * | |
14 | * Bandwidth and iops are the most commonly used metrics for IO devices but | |
15 | * depending on the type and specifics of the device, different IO patterns | |
16 | * easily lead to multiple orders of magnitude variations rendering them | |
17 | * useless for the purpose of IO capacity distribution. While on-device | |
18 | * time, with a lot of clutches, could serve as a useful approximation for | |
19 | * non-queued rotational devices, this is no longer viable with modern | |
20 | * devices, even the rotational ones. | |
21 | * | |
22 | * While there is no cost metric we can trivially observe, it isn't a | |
23 | * complete mystery. For example, on a rotational device, seek cost | |
24 | * dominates while a contiguous transfer contributes a smaller amount | |
25 | * proportional to the size. If we can characterize at least the relative | |
26 | * costs of these different types of IOs, it should be possible to | |
27 | * implement a reasonable work-conserving proportional IO resource | |
28 | * distribution. | |
29 | * | |
30 | * 1. IO Cost Model | |
31 | * | |
32 | * IO cost model estimates the cost of an IO given its basic parameters and | |
33 | * history (e.g. the end sector of the last IO). The cost is measured in | |
34 | * device time. If a given IO is estimated to cost 10ms, the device should | |
35 | * be able to process ~100 of those IOs in a second. | |
36 | * | |
37 | * Currently, there's only one builtin cost model - linear. Each IO is | |
38 | * classified as sequential or random and given a base cost accordingly. | |
39 | * On top of that, a size cost proportional to the length of the IO is | |
40 | * added. While simple, this model captures the operational | |
41 | * characteristics of a wide varienty of devices well enough. Default | |
42 | * paramters for several different classes of devices are provided and the | |
43 | * parameters can be configured from userspace via | |
44 | * /sys/fs/cgroup/io.cost.model. | |
45 | * | |
46 | * If needed, tools/cgroup/iocost_coef_gen.py can be used to generate | |
47 | * device-specific coefficients. | |
48 | * | |
49 | * 2. Control Strategy | |
50 | * | |
51 | * The device virtual time (vtime) is used as the primary control metric. | |
52 | * The control strategy is composed of the following three parts. | |
53 | * | |
54 | * 2-1. Vtime Distribution | |
55 | * | |
56 | * When a cgroup becomes active in terms of IOs, its hierarchical share is | |
57 | * calculated. Please consider the following hierarchy where the numbers | |
58 | * inside parentheses denote the configured weights. | |
59 | * | |
60 | * root | |
61 | * / \ | |
62 | * A (w:100) B (w:300) | |
63 | * / \ | |
64 | * A0 (w:100) A1 (w:100) | |
65 | * | |
66 | * If B is idle and only A0 and A1 are actively issuing IOs, as the two are | |
67 | * of equal weight, each gets 50% share. If then B starts issuing IOs, B | |
68 | * gets 300/(100+300) or 75% share, and A0 and A1 equally splits the rest, | |
69 | * 12.5% each. The distribution mechanism only cares about these flattened | |
70 | * shares. They're called hweights (hierarchical weights) and always add | |
fe20cdb5 | 71 | * upto 1 (WEIGHT_ONE). |
7caa4715 TH |
72 | * |
73 | * A given cgroup's vtime runs slower in inverse proportion to its hweight. | |
74 | * For example, with 12.5% weight, A0's time runs 8 times slower (100/12.5) | |
75 | * against the device vtime - an IO which takes 10ms on the underlying | |
76 | * device is considered to take 80ms on A0. | |
77 | * | |
78 | * This constitutes the basis of IO capacity distribution. Each cgroup's | |
79 | * vtime is running at a rate determined by its hweight. A cgroup tracks | |
80 | * the vtime consumed by past IOs and can issue a new IO iff doing so | |
81 | * wouldn't outrun the current device vtime. Otherwise, the IO is | |
82 | * suspended until the vtime has progressed enough to cover it. | |
83 | * | |
84 | * 2-2. Vrate Adjustment | |
85 | * | |
86 | * It's unrealistic to expect the cost model to be perfect. There are too | |
87 | * many devices and even on the same device the overall performance | |
88 | * fluctuates depending on numerous factors such as IO mixture and device | |
89 | * internal garbage collection. The controller needs to adapt dynamically. | |
90 | * | |
91 | * This is achieved by adjusting the overall IO rate according to how busy | |
92 | * the device is. If the device becomes overloaded, we're sending down too | |
93 | * many IOs and should generally slow down. If there are waiting issuers | |
94 | * but the device isn't saturated, we're issuing too few and should | |
95 | * generally speed up. | |
96 | * | |
97 | * To slow down, we lower the vrate - the rate at which the device vtime | |
98 | * passes compared to the wall clock. For example, if the vtime is running | |
99 | * at the vrate of 75%, all cgroups added up would only be able to issue | |
100 | * 750ms worth of IOs per second, and vice-versa for speeding up. | |
101 | * | |
102 | * Device business is determined using two criteria - rq wait and | |
103 | * completion latencies. | |
104 | * | |
105 | * When a device gets saturated, the on-device and then the request queues | |
106 | * fill up and a bio which is ready to be issued has to wait for a request | |
107 | * to become available. When this delay becomes noticeable, it's a clear | |
108 | * indication that the device is saturated and we lower the vrate. This | |
109 | * saturation signal is fairly conservative as it only triggers when both | |
110 | * hardware and software queues are filled up, and is used as the default | |
111 | * busy signal. | |
112 | * | |
113 | * As devices can have deep queues and be unfair in how the queued commands | |
114 | * are executed, soley depending on rq wait may not result in satisfactory | |
115 | * control quality. For a better control quality, completion latency QoS | |
116 | * parameters can be configured so that the device is considered saturated | |
117 | * if N'th percentile completion latency rises above the set point. | |
118 | * | |
119 | * The completion latency requirements are a function of both the | |
120 | * underlying device characteristics and the desired IO latency quality of | |
121 | * service. There is an inherent trade-off - the tighter the latency QoS, | |
122 | * the higher the bandwidth lossage. Latency QoS is disabled by default | |
123 | * and can be set through /sys/fs/cgroup/io.cost.qos. | |
124 | * | |
125 | * 2-3. Work Conservation | |
126 | * | |
127 | * Imagine two cgroups A and B with equal weights. A is issuing a small IO | |
128 | * periodically while B is sending out enough parallel IOs to saturate the | |
129 | * device on its own. Let's say A's usage amounts to 100ms worth of IO | |
130 | * cost per second, i.e., 10% of the device capacity. The naive | |
131 | * distribution of half and half would lead to 60% utilization of the | |
132 | * device, a significant reduction in the total amount of work done | |
133 | * compared to free-for-all competition. This is too high a cost to pay | |
134 | * for IO control. | |
135 | * | |
136 | * To conserve the total amount of work done, we keep track of how much | |
137 | * each active cgroup is actually using and yield part of its weight if | |
138 | * there are other cgroups which can make use of it. In the above case, | |
139 | * A's weight will be lowered so that it hovers above the actual usage and | |
140 | * B would be able to use the rest. | |
141 | * | |
142 | * As we don't want to penalize a cgroup for donating its weight, the | |
143 | * surplus weight adjustment factors in a margin and has an immediate | |
144 | * snapback mechanism in case the cgroup needs more IO vtime for itself. | |
145 | * | |
146 | * Note that adjusting down surplus weights has the same effects as | |
147 | * accelerating vtime for other cgroups and work conservation can also be | |
148 | * implemented by adjusting vrate dynamically. However, squaring who can | |
149 | * donate and should take back how much requires hweight propagations | |
150 | * anyway making it easier to implement and understand as a separate | |
151 | * mechanism. | |
6954ff18 TH |
152 | * |
153 | * 3. Monitoring | |
154 | * | |
155 | * Instead of debugfs or other clumsy monitoring mechanisms, this | |
156 | * controller uses a drgn based monitoring script - | |
157 | * tools/cgroup/iocost_monitor.py. For details on drgn, please see | |
158 | * https://github.com/osandov/drgn. The ouput looks like the following. | |
159 | * | |
160 | * sdb RUN per=300ms cur_per=234.218:v203.695 busy= +1 vrate= 62.12% | |
7c1ee704 TH |
161 | * active weight hweight% inflt% dbt delay usages% |
162 | * test/a * 50/ 50 33.33/ 33.33 27.65 2 0*041 033:033:033 | |
163 | * test/b * 100/ 100 66.67/ 66.67 17.56 0 0*000 066:079:077 | |
6954ff18 TH |
164 | * |
165 | * - per : Timer period | |
166 | * - cur_per : Internal wall and device vtime clock | |
167 | * - vrate : Device virtual time rate against wall clock | |
168 | * - weight : Surplus-adjusted and configured weights | |
169 | * - hweight : Surplus-adjusted and configured hierarchical weights | |
170 | * - inflt : The percentage of in-flight IO cost at the end of last period | |
171 | * - del_ms : Deferred issuer delay induction level and duration | |
172 | * - usages : Usage history | |
7caa4715 TH |
173 | */ |
174 | ||
175 | #include <linux/kernel.h> | |
176 | #include <linux/module.h> | |
177 | #include <linux/timer.h> | |
178 | #include <linux/time64.h> | |
179 | #include <linux/parser.h> | |
180 | #include <linux/sched/signal.h> | |
181 | #include <linux/blk-cgroup.h> | |
5e124f74 TH |
182 | #include <asm/local.h> |
183 | #include <asm/local64.h> | |
7caa4715 TH |
184 | #include "blk-rq-qos.h" |
185 | #include "blk-stat.h" | |
186 | #include "blk-wbt.h" | |
187 | ||
188 | #ifdef CONFIG_TRACEPOINTS | |
189 | ||
190 | /* copied from TRACE_CGROUP_PATH, see cgroup-internal.h */ | |
191 | #define TRACE_IOCG_PATH_LEN 1024 | |
192 | static DEFINE_SPINLOCK(trace_iocg_path_lock); | |
193 | static char trace_iocg_path[TRACE_IOCG_PATH_LEN]; | |
194 | ||
195 | #define TRACE_IOCG_PATH(type, iocg, ...) \ | |
196 | do { \ | |
197 | unsigned long flags; \ | |
198 | if (trace_iocost_##type##_enabled()) { \ | |
199 | spin_lock_irqsave(&trace_iocg_path_lock, flags); \ | |
200 | cgroup_path(iocg_to_blkg(iocg)->blkcg->css.cgroup, \ | |
201 | trace_iocg_path, TRACE_IOCG_PATH_LEN); \ | |
202 | trace_iocost_##type(iocg, trace_iocg_path, \ | |
203 | ##__VA_ARGS__); \ | |
204 | spin_unlock_irqrestore(&trace_iocg_path_lock, flags); \ | |
205 | } \ | |
206 | } while (0) | |
207 | ||
208 | #else /* CONFIG_TRACE_POINTS */ | |
209 | #define TRACE_IOCG_PATH(type, iocg, ...) do { } while (0) | |
210 | #endif /* CONFIG_TRACE_POINTS */ | |
211 | ||
212 | enum { | |
213 | MILLION = 1000000, | |
214 | ||
215 | /* timer period is calculated from latency requirements, bound it */ | |
216 | MIN_PERIOD = USEC_PER_MSEC, | |
217 | MAX_PERIOD = USEC_PER_SEC, | |
218 | ||
219 | /* | |
f1de2439 | 220 | * iocg->vtime is targeted at 50% behind the device vtime, which |
7caa4715 TH |
221 | * serves as its IO credit buffer. Surplus weight adjustment is |
222 | * immediately canceled if the vtime margin runs below 10%. | |
223 | */ | |
7ca5b2e6 | 224 | MARGIN_MIN_PCT = 10, |
f1de2439 TH |
225 | MARGIN_LOW_PCT = 20, |
226 | MARGIN_TARGET_PCT = 50, | |
7caa4715 | 227 | |
b0853ab4 TH |
228 | INUSE_ADJ_STEP_PCT = 25, |
229 | ||
7ca5b2e6 TH |
230 | /* Have some play in timer operations */ |
231 | TIMER_SLACK_PCT = 1, | |
7caa4715 | 232 | |
7caa4715 | 233 | /* 1/64k is granular enough and can easily be handled w/ u32 */ |
fe20cdb5 | 234 | WEIGHT_ONE = 1 << 16, |
7caa4715 TH |
235 | |
236 | /* | |
237 | * As vtime is used to calculate the cost of each IO, it needs to | |
238 | * be fairly high precision. For example, it should be able to | |
239 | * represent the cost of a single page worth of discard with | |
240 | * suffificient accuracy. At the same time, it should be able to | |
241 | * represent reasonably long enough durations to be useful and | |
242 | * convenient during operation. | |
243 | * | |
244 | * 1s worth of vtime is 2^37. This gives us both sub-nanosecond | |
245 | * granularity and days of wrap-around time even at extreme vrates. | |
246 | */ | |
247 | VTIME_PER_SEC_SHIFT = 37, | |
248 | VTIME_PER_SEC = 1LLU << VTIME_PER_SEC_SHIFT, | |
249 | VTIME_PER_USEC = VTIME_PER_SEC / USEC_PER_SEC, | |
cd006509 | 250 | VTIME_PER_NSEC = VTIME_PER_SEC / NSEC_PER_SEC, |
7caa4715 TH |
251 | |
252 | /* bound vrate adjustments within two orders of magnitude */ | |
253 | VRATE_MIN_PPM = 10000, /* 1% */ | |
254 | VRATE_MAX_PPM = 100000000, /* 10000% */ | |
255 | ||
256 | VRATE_MIN = VTIME_PER_USEC * VRATE_MIN_PPM / MILLION, | |
257 | VRATE_CLAMP_ADJ_PCT = 4, | |
258 | ||
259 | /* if IOs end up waiting for requests, issue less */ | |
260 | RQ_WAIT_BUSY_PCT = 5, | |
261 | ||
262 | /* unbusy hysterisis */ | |
263 | UNBUSY_THR_PCT = 75, | |
264 | ||
5160a5a5 TH |
265 | /* |
266 | * The effect of delay is indirect and non-linear and a huge amount of | |
267 | * future debt can accumulate abruptly while unthrottled. Linearly scale | |
268 | * up delay as debt is going up and then let it decay exponentially. | |
269 | * This gives us quick ramp ups while delay is accumulating and long | |
270 | * tails which can help reducing the frequency of debt explosions on | |
271 | * unthrottle. The parameters are experimentally determined. | |
272 | * | |
273 | * The delay mechanism provides adequate protection and behavior in many | |
274 | * cases. However, this is far from ideal and falls shorts on both | |
275 | * fronts. The debtors are often throttled too harshly costing a | |
276 | * significant level of fairness and possibly total work while the | |
277 | * protection against their impacts on the system can be choppy and | |
278 | * unreliable. | |
279 | * | |
280 | * The shortcoming primarily stems from the fact that, unlike for page | |
281 | * cache, the kernel doesn't have well-defined back-pressure propagation | |
282 | * mechanism and policies for anonymous memory. Fully addressing this | |
283 | * issue will likely require substantial improvements in the area. | |
284 | */ | |
285 | MIN_DELAY_THR_PCT = 500, | |
286 | MAX_DELAY_THR_PCT = 25000, | |
287 | MIN_DELAY = 250, | |
288 | MAX_DELAY = 250 * USEC_PER_MSEC, | |
289 | ||
dda1315f TH |
290 | /* |
291 | * Halve debts if total usage keeps staying under 25% w/o any shortages | |
292 | * for over 100ms. | |
293 | */ | |
294 | DEBT_BUSY_USAGE_PCT = 25, | |
295 | DEBT_REDUCTION_IDLE_DUR = 100 * USEC_PER_MSEC, | |
296 | ||
7caa4715 TH |
297 | /* don't let cmds which take a very long time pin lagging for too long */ |
298 | MAX_LAGGING_PERIODS = 10, | |
299 | ||
7caa4715 TH |
300 | /* switch iff the conditions are met for longer than this */ |
301 | AUTOP_CYCLE_NSEC = 10LLU * NSEC_PER_SEC, | |
302 | ||
303 | /* | |
304 | * Count IO size in 4k pages. The 12bit shift helps keeping | |
305 | * size-proportional components of cost calculation in closer | |
306 | * numbers of digits to per-IO cost components. | |
307 | */ | |
308 | IOC_PAGE_SHIFT = 12, | |
309 | IOC_PAGE_SIZE = 1 << IOC_PAGE_SHIFT, | |
310 | IOC_SECT_TO_PAGE_SHIFT = IOC_PAGE_SHIFT - SECTOR_SHIFT, | |
311 | ||
312 | /* if apart further than 16M, consider randio for linear model */ | |
313 | LCOEF_RANDIO_PAGES = 4096, | |
314 | }; | |
315 | ||
316 | enum ioc_running { | |
317 | IOC_IDLE, | |
318 | IOC_RUNNING, | |
319 | IOC_STOP, | |
320 | }; | |
321 | ||
322 | /* io.cost.qos controls including per-dev enable of the whole controller */ | |
323 | enum { | |
324 | QOS_ENABLE, | |
325 | QOS_CTRL, | |
326 | NR_QOS_CTRL_PARAMS, | |
327 | }; | |
328 | ||
329 | /* io.cost.qos params */ | |
330 | enum { | |
331 | QOS_RPPM, | |
332 | QOS_RLAT, | |
333 | QOS_WPPM, | |
334 | QOS_WLAT, | |
335 | QOS_MIN, | |
336 | QOS_MAX, | |
337 | NR_QOS_PARAMS, | |
338 | }; | |
339 | ||
340 | /* io.cost.model controls */ | |
341 | enum { | |
342 | COST_CTRL, | |
343 | COST_MODEL, | |
344 | NR_COST_CTRL_PARAMS, | |
345 | }; | |
346 | ||
347 | /* builtin linear cost model coefficients */ | |
348 | enum { | |
349 | I_LCOEF_RBPS, | |
350 | I_LCOEF_RSEQIOPS, | |
351 | I_LCOEF_RRANDIOPS, | |
352 | I_LCOEF_WBPS, | |
353 | I_LCOEF_WSEQIOPS, | |
354 | I_LCOEF_WRANDIOPS, | |
355 | NR_I_LCOEFS, | |
356 | }; | |
357 | ||
358 | enum { | |
359 | LCOEF_RPAGE, | |
360 | LCOEF_RSEQIO, | |
361 | LCOEF_RRANDIO, | |
362 | LCOEF_WPAGE, | |
363 | LCOEF_WSEQIO, | |
364 | LCOEF_WRANDIO, | |
365 | NR_LCOEFS, | |
366 | }; | |
367 | ||
368 | enum { | |
369 | AUTOP_INVALID, | |
370 | AUTOP_HDD, | |
371 | AUTOP_SSD_QD1, | |
372 | AUTOP_SSD_DFL, | |
373 | AUTOP_SSD_FAST, | |
374 | }; | |
375 | ||
376 | struct ioc_gq; | |
377 | ||
378 | struct ioc_params { | |
379 | u32 qos[NR_QOS_PARAMS]; | |
380 | u64 i_lcoefs[NR_I_LCOEFS]; | |
381 | u64 lcoefs[NR_LCOEFS]; | |
382 | u32 too_fast_vrate_pct; | |
383 | u32 too_slow_vrate_pct; | |
384 | }; | |
385 | ||
7ca5b2e6 TH |
386 | struct ioc_margins { |
387 | s64 min; | |
f1de2439 TH |
388 | s64 low; |
389 | s64 target; | |
7ca5b2e6 TH |
390 | }; |
391 | ||
7caa4715 | 392 | struct ioc_missed { |
5e124f74 TH |
393 | local_t nr_met; |
394 | local_t nr_missed; | |
7caa4715 TH |
395 | u32 last_met; |
396 | u32 last_missed; | |
397 | }; | |
398 | ||
399 | struct ioc_pcpu_stat { | |
400 | struct ioc_missed missed[2]; | |
401 | ||
5e124f74 | 402 | local64_t rq_wait_ns; |
7caa4715 TH |
403 | u64 last_rq_wait_ns; |
404 | }; | |
405 | ||
406 | /* per device */ | |
407 | struct ioc { | |
408 | struct rq_qos rqos; | |
409 | ||
410 | bool enabled; | |
411 | ||
412 | struct ioc_params params; | |
7ca5b2e6 | 413 | struct ioc_margins margins; |
7caa4715 | 414 | u32 period_us; |
7ca5b2e6 | 415 | u32 timer_slack_ns; |
7caa4715 TH |
416 | u64 vrate_min; |
417 | u64 vrate_max; | |
418 | ||
419 | spinlock_t lock; | |
420 | struct timer_list timer; | |
421 | struct list_head active_iocgs; /* active cgroups */ | |
422 | struct ioc_pcpu_stat __percpu *pcpu_stat; | |
423 | ||
424 | enum ioc_running running; | |
425 | atomic64_t vtime_rate; | |
ac33e91e TH |
426 | u64 vtime_base_rate; |
427 | s64 vtime_err; | |
7caa4715 | 428 | |
67b7b641 | 429 | seqcount_spinlock_t period_seqcount; |
ce95570a | 430 | u64 period_at; /* wallclock starttime */ |
7caa4715 TH |
431 | u64 period_at_vtime; /* vtime starttime */ |
432 | ||
433 | atomic64_t cur_period; /* inc'd each period */ | |
434 | int busy_level; /* saturation history */ | |
435 | ||
7caa4715 TH |
436 | bool weights_updated; |
437 | atomic_t hweight_gen; /* for lazy hweights */ | |
438 | ||
dda1315f TH |
439 | /* the last time debt cancel condition wasn't met */ |
440 | u64 debt_busy_at; | |
441 | ||
7caa4715 TH |
442 | u64 autop_too_fast_at; |
443 | u64 autop_too_slow_at; | |
444 | int autop_idx; | |
445 | bool user_qos_params:1; | |
446 | bool user_cost_model:1; | |
447 | }; | |
448 | ||
97eb1975 TH |
449 | struct iocg_pcpu_stat { |
450 | local64_t abs_vusage; | |
451 | }; | |
452 | ||
453 | struct iocg_stat { | |
454 | u64 usage_us; | |
f0bf84a5 TH |
455 | u64 wait_us; |
456 | u64 indebt_us; | |
457 | u64 indelay_us; | |
97eb1975 TH |
458 | }; |
459 | ||
7caa4715 TH |
460 | /* per device-cgroup pair */ |
461 | struct ioc_gq { | |
462 | struct blkg_policy_data pd; | |
463 | struct ioc *ioc; | |
464 | ||
465 | /* | |
466 | * A iocg can get its weight from two sources - an explicit | |
467 | * per-device-cgroup configuration or the default weight of the | |
468 | * cgroup. `cfg_weight` is the explicit per-device-cgroup | |
469 | * configuration. `weight` is the effective considering both | |
470 | * sources. | |
471 | * | |
472 | * When an idle cgroup becomes active its `active` goes from 0 to | |
473 | * `weight`. `inuse` is the surplus adjusted active weight. | |
474 | * `active` and `inuse` are used to calculate `hweight_active` and | |
475 | * `hweight_inuse`. | |
476 | * | |
477 | * `last_inuse` remembers `inuse` while an iocg is idle to persist | |
478 | * surplus adjustments. | |
b0853ab4 TH |
479 | * |
480 | * `inuse` may be adjusted dynamically during period. `saved_*` are used | |
481 | * to determine and track adjustments. | |
7caa4715 TH |
482 | */ |
483 | u32 cfg_weight; | |
484 | u32 weight; | |
485 | u32 active; | |
486 | u32 inuse; | |
b0853ab4 | 487 | |
7caa4715 | 488 | u32 last_inuse; |
b0853ab4 | 489 | s64 saved_margin; |
7caa4715 TH |
490 | |
491 | sector_t cursor; /* to detect randio */ | |
492 | ||
493 | /* | |
494 | * `vtime` is this iocg's vtime cursor which progresses as IOs are | |
495 | * issued. If lagging behind device vtime, the delta represents | |
496 | * the currently available IO budget. If runnning ahead, the | |
497 | * overage. | |
498 | * | |
499 | * `vtime_done` is the same but progressed on completion rather | |
500 | * than issue. The delta behind `vtime` represents the cost of | |
501 | * currently in-flight IOs. | |
7caa4715 TH |
502 | */ |
503 | atomic64_t vtime; | |
504 | atomic64_t done_vtime; | |
0b80f986 | 505 | u64 abs_vdebt; |
7caa4715 | 506 | |
5160a5a5 TH |
507 | /* current delay in effect and when it started */ |
508 | u64 delay; | |
509 | u64 delay_at; | |
510 | ||
7caa4715 TH |
511 | /* |
512 | * The period this iocg was last active in. Used for deactivation | |
513 | * and invalidating `vtime`. | |
514 | */ | |
515 | atomic64_t active_period; | |
516 | struct list_head active_list; | |
517 | ||
00410f1b | 518 | /* see __propagate_weights() and current_hweight() for details */ |
7caa4715 TH |
519 | u64 child_active_sum; |
520 | u64 child_inuse_sum; | |
e08d02aa | 521 | u64 child_adjusted_sum; |
7caa4715 TH |
522 | int hweight_gen; |
523 | u32 hweight_active; | |
524 | u32 hweight_inuse; | |
e08d02aa | 525 | u32 hweight_donating; |
93f7d2db | 526 | u32 hweight_after_donation; |
7caa4715 | 527 | |
97eb1975 | 528 | struct list_head walk_list; |
8692d2db | 529 | struct list_head surplus_list; |
97eb1975 | 530 | |
7caa4715 TH |
531 | struct wait_queue_head waitq; |
532 | struct hrtimer waitq_timer; | |
7caa4715 | 533 | |
1aa50d02 TH |
534 | /* timestamp at the latest activation */ |
535 | u64 activated_at; | |
536 | ||
97eb1975 TH |
537 | /* statistics */ |
538 | struct iocg_pcpu_stat __percpu *pcpu_stat; | |
539 | struct iocg_stat local_stat; | |
540 | struct iocg_stat desc_stat; | |
541 | struct iocg_stat last_stat; | |
542 | u64 last_stat_abs_vusage; | |
f1de2439 | 543 | u64 usage_delta_us; |
f0bf84a5 TH |
544 | u64 wait_since; |
545 | u64 indebt_since; | |
546 | u64 indelay_since; | |
7caa4715 TH |
547 | |
548 | /* this iocg's depth in the hierarchy and ancestors including self */ | |
549 | int level; | |
550 | struct ioc_gq *ancestors[]; | |
551 | }; | |
552 | ||
553 | /* per cgroup */ | |
554 | struct ioc_cgrp { | |
555 | struct blkcg_policy_data cpd; | |
556 | unsigned int dfl_weight; | |
557 | }; | |
558 | ||
559 | struct ioc_now { | |
560 | u64 now_ns; | |
ce95570a | 561 | u64 now; |
7caa4715 TH |
562 | u64 vnow; |
563 | u64 vrate; | |
564 | }; | |
565 | ||
566 | struct iocg_wait { | |
567 | struct wait_queue_entry wait; | |
568 | struct bio *bio; | |
569 | u64 abs_cost; | |
570 | bool committed; | |
571 | }; | |
572 | ||
573 | struct iocg_wake_ctx { | |
574 | struct ioc_gq *iocg; | |
575 | u32 hw_inuse; | |
576 | s64 vbudget; | |
577 | }; | |
578 | ||
579 | static const struct ioc_params autop[] = { | |
580 | [AUTOP_HDD] = { | |
581 | .qos = { | |
7afcccaf TH |
582 | [QOS_RLAT] = 250000, /* 250ms */ |
583 | [QOS_WLAT] = 250000, | |
7caa4715 TH |
584 | [QOS_MIN] = VRATE_MIN_PPM, |
585 | [QOS_MAX] = VRATE_MAX_PPM, | |
586 | }, | |
587 | .i_lcoefs = { | |
588 | [I_LCOEF_RBPS] = 174019176, | |
589 | [I_LCOEF_RSEQIOPS] = 41708, | |
590 | [I_LCOEF_RRANDIOPS] = 370, | |
591 | [I_LCOEF_WBPS] = 178075866, | |
592 | [I_LCOEF_WSEQIOPS] = 42705, | |
593 | [I_LCOEF_WRANDIOPS] = 378, | |
594 | }, | |
595 | }, | |
596 | [AUTOP_SSD_QD1] = { | |
597 | .qos = { | |
598 | [QOS_RLAT] = 25000, /* 25ms */ | |
599 | [QOS_WLAT] = 25000, | |
600 | [QOS_MIN] = VRATE_MIN_PPM, | |
601 | [QOS_MAX] = VRATE_MAX_PPM, | |
602 | }, | |
603 | .i_lcoefs = { | |
604 | [I_LCOEF_RBPS] = 245855193, | |
605 | [I_LCOEF_RSEQIOPS] = 61575, | |
606 | [I_LCOEF_RRANDIOPS] = 6946, | |
607 | [I_LCOEF_WBPS] = 141365009, | |
608 | [I_LCOEF_WSEQIOPS] = 33716, | |
609 | [I_LCOEF_WRANDIOPS] = 26796, | |
610 | }, | |
611 | }, | |
612 | [AUTOP_SSD_DFL] = { | |
613 | .qos = { | |
614 | [QOS_RLAT] = 25000, /* 25ms */ | |
615 | [QOS_WLAT] = 25000, | |
616 | [QOS_MIN] = VRATE_MIN_PPM, | |
617 | [QOS_MAX] = VRATE_MAX_PPM, | |
618 | }, | |
619 | .i_lcoefs = { | |
620 | [I_LCOEF_RBPS] = 488636629, | |
621 | [I_LCOEF_RSEQIOPS] = 8932, | |
622 | [I_LCOEF_RRANDIOPS] = 8518, | |
623 | [I_LCOEF_WBPS] = 427891549, | |
624 | [I_LCOEF_WSEQIOPS] = 28755, | |
625 | [I_LCOEF_WRANDIOPS] = 21940, | |
626 | }, | |
627 | .too_fast_vrate_pct = 500, | |
628 | }, | |
629 | [AUTOP_SSD_FAST] = { | |
630 | .qos = { | |
631 | [QOS_RLAT] = 5000, /* 5ms */ | |
632 | [QOS_WLAT] = 5000, | |
633 | [QOS_MIN] = VRATE_MIN_PPM, | |
634 | [QOS_MAX] = VRATE_MAX_PPM, | |
635 | }, | |
636 | .i_lcoefs = { | |
637 | [I_LCOEF_RBPS] = 3102524156LLU, | |
638 | [I_LCOEF_RSEQIOPS] = 724816, | |
639 | [I_LCOEF_RRANDIOPS] = 778122, | |
640 | [I_LCOEF_WBPS] = 1742780862LLU, | |
641 | [I_LCOEF_WSEQIOPS] = 425702, | |
642 | [I_LCOEF_WRANDIOPS] = 443193, | |
643 | }, | |
644 | .too_slow_vrate_pct = 10, | |
645 | }, | |
646 | }; | |
647 | ||
648 | /* | |
649 | * vrate adjust percentages indexed by ioc->busy_level. We adjust up on | |
650 | * vtime credit shortage and down on device saturation. | |
651 | */ | |
652 | static u32 vrate_adj_pct[] = | |
653 | { 0, 0, 0, 0, | |
654 | 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, | |
655 | 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, | |
656 | 4, 4, 4, 4, 4, 4, 4, 4, 8, 8, 8, 8, 8, 8, 8, 8, 16 }; | |
657 | ||
658 | static struct blkcg_policy blkcg_policy_iocost; | |
659 | ||
660 | /* accessors and helpers */ | |
661 | static struct ioc *rqos_to_ioc(struct rq_qos *rqos) | |
662 | { | |
663 | return container_of(rqos, struct ioc, rqos); | |
664 | } | |
665 | ||
666 | static struct ioc *q_to_ioc(struct request_queue *q) | |
667 | { | |
668 | return rqos_to_ioc(rq_qos_id(q, RQ_QOS_COST)); | |
669 | } | |
670 | ||
671 | static const char *q_name(struct request_queue *q) | |
672 | { | |
673 | if (test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags)) | |
674 | return kobject_name(q->kobj.parent); | |
675 | else | |
676 | return "<unknown>"; | |
677 | } | |
678 | ||
679 | static const char __maybe_unused *ioc_name(struct ioc *ioc) | |
680 | { | |
681 | return q_name(ioc->rqos.q); | |
682 | } | |
683 | ||
684 | static struct ioc_gq *pd_to_iocg(struct blkg_policy_data *pd) | |
685 | { | |
686 | return pd ? container_of(pd, struct ioc_gq, pd) : NULL; | |
687 | } | |
688 | ||
689 | static struct ioc_gq *blkg_to_iocg(struct blkcg_gq *blkg) | |
690 | { | |
691 | return pd_to_iocg(blkg_to_pd(blkg, &blkcg_policy_iocost)); | |
692 | } | |
693 | ||
694 | static struct blkcg_gq *iocg_to_blkg(struct ioc_gq *iocg) | |
695 | { | |
696 | return pd_to_blkg(&iocg->pd); | |
697 | } | |
698 | ||
699 | static struct ioc_cgrp *blkcg_to_iocc(struct blkcg *blkcg) | |
700 | { | |
701 | return container_of(blkcg_to_cpd(blkcg, &blkcg_policy_iocost), | |
702 | struct ioc_cgrp, cpd); | |
703 | } | |
704 | ||
705 | /* | |
706 | * Scale @abs_cost to the inverse of @hw_inuse. The lower the hierarchical | |
36a52481 | 707 | * weight, the more expensive each IO. Must round up. |
7caa4715 TH |
708 | */ |
709 | static u64 abs_cost_to_cost(u64 abs_cost, u32 hw_inuse) | |
710 | { | |
fe20cdb5 | 711 | return DIV64_U64_ROUND_UP(abs_cost * WEIGHT_ONE, hw_inuse); |
7caa4715 TH |
712 | } |
713 | ||
36a52481 TH |
714 | /* |
715 | * The inverse of abs_cost_to_cost(). Must round up. | |
716 | */ | |
717 | static u64 cost_to_abs_cost(u64 cost, u32 hw_inuse) | |
718 | { | |
fe20cdb5 | 719 | return DIV64_U64_ROUND_UP(cost * hw_inuse, WEIGHT_ONE); |
36a52481 TH |
720 | } |
721 | ||
97eb1975 TH |
722 | static void iocg_commit_bio(struct ioc_gq *iocg, struct bio *bio, |
723 | u64 abs_cost, u64 cost) | |
7caa4715 | 724 | { |
97eb1975 TH |
725 | struct iocg_pcpu_stat *gcs; |
726 | ||
7caa4715 TH |
727 | bio->bi_iocost_cost = cost; |
728 | atomic64_add(cost, &iocg->vtime); | |
97eb1975 TH |
729 | |
730 | gcs = get_cpu_ptr(iocg->pcpu_stat); | |
731 | local64_add(abs_cost, &gcs->abs_vusage); | |
732 | put_cpu_ptr(gcs); | |
7caa4715 TH |
733 | } |
734 | ||
da437b95 TH |
735 | static void iocg_lock(struct ioc_gq *iocg, bool lock_ioc, unsigned long *flags) |
736 | { | |
737 | if (lock_ioc) { | |
738 | spin_lock_irqsave(&iocg->ioc->lock, *flags); | |
739 | spin_lock(&iocg->waitq.lock); | |
740 | } else { | |
741 | spin_lock_irqsave(&iocg->waitq.lock, *flags); | |
742 | } | |
743 | } | |
744 | ||
745 | static void iocg_unlock(struct ioc_gq *iocg, bool unlock_ioc, unsigned long *flags) | |
746 | { | |
747 | if (unlock_ioc) { | |
748 | spin_unlock(&iocg->waitq.lock); | |
749 | spin_unlock_irqrestore(&iocg->ioc->lock, *flags); | |
750 | } else { | |
751 | spin_unlock_irqrestore(&iocg->waitq.lock, *flags); | |
752 | } | |
753 | } | |
754 | ||
7caa4715 TH |
755 | #define CREATE_TRACE_POINTS |
756 | #include <trace/events/iocost.h> | |
757 | ||
7ca5b2e6 TH |
758 | static void ioc_refresh_margins(struct ioc *ioc) |
759 | { | |
760 | struct ioc_margins *margins = &ioc->margins; | |
761 | u32 period_us = ioc->period_us; | |
ac33e91e | 762 | u64 vrate = ioc->vtime_base_rate; |
7ca5b2e6 TH |
763 | |
764 | margins->min = (period_us * MARGIN_MIN_PCT / 100) * vrate; | |
f1de2439 TH |
765 | margins->low = (period_us * MARGIN_LOW_PCT / 100) * vrate; |
766 | margins->target = (period_us * MARGIN_TARGET_PCT / 100) * vrate; | |
7ca5b2e6 TH |
767 | } |
768 | ||
7caa4715 TH |
769 | /* latency Qos params changed, update period_us and all the dependent params */ |
770 | static void ioc_refresh_period_us(struct ioc *ioc) | |
771 | { | |
772 | u32 ppm, lat, multi, period_us; | |
773 | ||
774 | lockdep_assert_held(&ioc->lock); | |
775 | ||
776 | /* pick the higher latency target */ | |
777 | if (ioc->params.qos[QOS_RLAT] >= ioc->params.qos[QOS_WLAT]) { | |
778 | ppm = ioc->params.qos[QOS_RPPM]; | |
779 | lat = ioc->params.qos[QOS_RLAT]; | |
780 | } else { | |
781 | ppm = ioc->params.qos[QOS_WPPM]; | |
782 | lat = ioc->params.qos[QOS_WLAT]; | |
783 | } | |
784 | ||
785 | /* | |
786 | * We want the period to be long enough to contain a healthy number | |
787 | * of IOs while short enough for granular control. Define it as a | |
788 | * multiple of the latency target. Ideally, the multiplier should | |
789 | * be scaled according to the percentile so that it would nominally | |
790 | * contain a certain number of requests. Let's be simpler and | |
791 | * scale it linearly so that it's 2x >= pct(90) and 10x at pct(50). | |
792 | */ | |
793 | if (ppm) | |
794 | multi = max_t(u32, (MILLION - ppm) / 50000, 2); | |
795 | else | |
796 | multi = 2; | |
797 | period_us = multi * lat; | |
798 | period_us = clamp_t(u32, period_us, MIN_PERIOD, MAX_PERIOD); | |
799 | ||
800 | /* calculate dependent params */ | |
801 | ioc->period_us = period_us; | |
7ca5b2e6 TH |
802 | ioc->timer_slack_ns = div64_u64( |
803 | (u64)period_us * NSEC_PER_USEC * TIMER_SLACK_PCT, | |
804 | 100); | |
805 | ioc_refresh_margins(ioc); | |
7caa4715 TH |
806 | } |
807 | ||
808 | static int ioc_autop_idx(struct ioc *ioc) | |
809 | { | |
810 | int idx = ioc->autop_idx; | |
811 | const struct ioc_params *p = &autop[idx]; | |
812 | u32 vrate_pct; | |
813 | u64 now_ns; | |
814 | ||
815 | /* rotational? */ | |
816 | if (!blk_queue_nonrot(ioc->rqos.q)) | |
817 | return AUTOP_HDD; | |
818 | ||
819 | /* handle SATA SSDs w/ broken NCQ */ | |
820 | if (blk_queue_depth(ioc->rqos.q) == 1) | |
821 | return AUTOP_SSD_QD1; | |
822 | ||
823 | /* use one of the normal ssd sets */ | |
824 | if (idx < AUTOP_SSD_DFL) | |
825 | return AUTOP_SSD_DFL; | |
826 | ||
827 | /* if user is overriding anything, maintain what was there */ | |
828 | if (ioc->user_qos_params || ioc->user_cost_model) | |
829 | return idx; | |
830 | ||
831 | /* step up/down based on the vrate */ | |
ac33e91e | 832 | vrate_pct = div64_u64(ioc->vtime_base_rate * 100, VTIME_PER_USEC); |
7caa4715 TH |
833 | now_ns = ktime_get_ns(); |
834 | ||
835 | if (p->too_fast_vrate_pct && p->too_fast_vrate_pct <= vrate_pct) { | |
836 | if (!ioc->autop_too_fast_at) | |
837 | ioc->autop_too_fast_at = now_ns; | |
838 | if (now_ns - ioc->autop_too_fast_at >= AUTOP_CYCLE_NSEC) | |
839 | return idx + 1; | |
840 | } else { | |
841 | ioc->autop_too_fast_at = 0; | |
842 | } | |
843 | ||
844 | if (p->too_slow_vrate_pct && p->too_slow_vrate_pct >= vrate_pct) { | |
845 | if (!ioc->autop_too_slow_at) | |
846 | ioc->autop_too_slow_at = now_ns; | |
847 | if (now_ns - ioc->autop_too_slow_at >= AUTOP_CYCLE_NSEC) | |
848 | return idx - 1; | |
849 | } else { | |
850 | ioc->autop_too_slow_at = 0; | |
851 | } | |
852 | ||
853 | return idx; | |
854 | } | |
855 | ||
856 | /* | |
857 | * Take the followings as input | |
858 | * | |
859 | * @bps maximum sequential throughput | |
860 | * @seqiops maximum sequential 4k iops | |
861 | * @randiops maximum random 4k iops | |
862 | * | |
863 | * and calculate the linear model cost coefficients. | |
864 | * | |
865 | * *@page per-page cost 1s / (@bps / 4096) | |
866 | * *@seqio base cost of a seq IO max((1s / @seqiops) - *@page, 0) | |
867 | * @randiops base cost of a rand IO max((1s / @randiops) - *@page, 0) | |
868 | */ | |
869 | static void calc_lcoefs(u64 bps, u64 seqiops, u64 randiops, | |
870 | u64 *page, u64 *seqio, u64 *randio) | |
871 | { | |
872 | u64 v; | |
873 | ||
874 | *page = *seqio = *randio = 0; | |
875 | ||
876 | if (bps) | |
877 | *page = DIV64_U64_ROUND_UP(VTIME_PER_SEC, | |
878 | DIV_ROUND_UP_ULL(bps, IOC_PAGE_SIZE)); | |
879 | ||
880 | if (seqiops) { | |
881 | v = DIV64_U64_ROUND_UP(VTIME_PER_SEC, seqiops); | |
882 | if (v > *page) | |
883 | *seqio = v - *page; | |
884 | } | |
885 | ||
886 | if (randiops) { | |
887 | v = DIV64_U64_ROUND_UP(VTIME_PER_SEC, randiops); | |
888 | if (v > *page) | |
889 | *randio = v - *page; | |
890 | } | |
891 | } | |
892 | ||
893 | static void ioc_refresh_lcoefs(struct ioc *ioc) | |
894 | { | |
895 | u64 *u = ioc->params.i_lcoefs; | |
896 | u64 *c = ioc->params.lcoefs; | |
897 | ||
898 | calc_lcoefs(u[I_LCOEF_RBPS], u[I_LCOEF_RSEQIOPS], u[I_LCOEF_RRANDIOPS], | |
899 | &c[LCOEF_RPAGE], &c[LCOEF_RSEQIO], &c[LCOEF_RRANDIO]); | |
900 | calc_lcoefs(u[I_LCOEF_WBPS], u[I_LCOEF_WSEQIOPS], u[I_LCOEF_WRANDIOPS], | |
901 | &c[LCOEF_WPAGE], &c[LCOEF_WSEQIO], &c[LCOEF_WRANDIO]); | |
902 | } | |
903 | ||
904 | static bool ioc_refresh_params(struct ioc *ioc, bool force) | |
905 | { | |
906 | const struct ioc_params *p; | |
907 | int idx; | |
908 | ||
909 | lockdep_assert_held(&ioc->lock); | |
910 | ||
911 | idx = ioc_autop_idx(ioc); | |
912 | p = &autop[idx]; | |
913 | ||
914 | if (idx == ioc->autop_idx && !force) | |
915 | return false; | |
916 | ||
917 | if (idx != ioc->autop_idx) | |
918 | atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC); | |
919 | ||
920 | ioc->autop_idx = idx; | |
921 | ioc->autop_too_fast_at = 0; | |
922 | ioc->autop_too_slow_at = 0; | |
923 | ||
924 | if (!ioc->user_qos_params) | |
925 | memcpy(ioc->params.qos, p->qos, sizeof(p->qos)); | |
926 | if (!ioc->user_cost_model) | |
927 | memcpy(ioc->params.i_lcoefs, p->i_lcoefs, sizeof(p->i_lcoefs)); | |
928 | ||
929 | ioc_refresh_period_us(ioc); | |
930 | ioc_refresh_lcoefs(ioc); | |
931 | ||
932 | ioc->vrate_min = DIV64_U64_ROUND_UP((u64)ioc->params.qos[QOS_MIN] * | |
933 | VTIME_PER_USEC, MILLION); | |
934 | ioc->vrate_max = div64_u64((u64)ioc->params.qos[QOS_MAX] * | |
935 | VTIME_PER_USEC, MILLION); | |
936 | ||
937 | return true; | |
938 | } | |
939 | ||
ac33e91e TH |
940 | /* |
941 | * When an iocg accumulates too much vtime or gets deactivated, we throw away | |
942 | * some vtime, which lowers the overall device utilization. As the exact amount | |
943 | * which is being thrown away is known, we can compensate by accelerating the | |
944 | * vrate accordingly so that the extra vtime generated in the current period | |
945 | * matches what got lost. | |
946 | */ | |
947 | static void ioc_refresh_vrate(struct ioc *ioc, struct ioc_now *now) | |
948 | { | |
949 | s64 pleft = ioc->period_at + ioc->period_us - now->now; | |
950 | s64 vperiod = ioc->period_us * ioc->vtime_base_rate; | |
951 | s64 vcomp, vcomp_min, vcomp_max; | |
952 | ||
953 | lockdep_assert_held(&ioc->lock); | |
954 | ||
955 | /* we need some time left in this period */ | |
956 | if (pleft <= 0) | |
957 | goto done; | |
958 | ||
959 | /* | |
960 | * Calculate how much vrate should be adjusted to offset the error. | |
961 | * Limit the amount of adjustment and deduct the adjusted amount from | |
962 | * the error. | |
963 | */ | |
964 | vcomp = -div64_s64(ioc->vtime_err, pleft); | |
965 | vcomp_min = -(ioc->vtime_base_rate >> 1); | |
966 | vcomp_max = ioc->vtime_base_rate; | |
967 | vcomp = clamp(vcomp, vcomp_min, vcomp_max); | |
968 | ||
969 | ioc->vtime_err += vcomp * pleft; | |
970 | ||
971 | atomic64_set(&ioc->vtime_rate, ioc->vtime_base_rate + vcomp); | |
972 | done: | |
973 | /* bound how much error can accumulate */ | |
974 | ioc->vtime_err = clamp(ioc->vtime_err, -vperiod, vperiod); | |
975 | } | |
976 | ||
7caa4715 TH |
977 | /* take a snapshot of the current [v]time and vrate */ |
978 | static void ioc_now(struct ioc *ioc, struct ioc_now *now) | |
979 | { | |
980 | unsigned seq; | |
981 | ||
982 | now->now_ns = ktime_get(); | |
983 | now->now = ktime_to_us(now->now_ns); | |
984 | now->vrate = atomic64_read(&ioc->vtime_rate); | |
985 | ||
986 | /* | |
987 | * The current vtime is | |
988 | * | |
989 | * vtime at period start + (wallclock time since the start) * vrate | |
990 | * | |
991 | * As a consistent snapshot of `period_at_vtime` and `period_at` is | |
992 | * needed, they're seqcount protected. | |
993 | */ | |
994 | do { | |
995 | seq = read_seqcount_begin(&ioc->period_seqcount); | |
996 | now->vnow = ioc->period_at_vtime + | |
997 | (now->now - ioc->period_at) * now->vrate; | |
998 | } while (read_seqcount_retry(&ioc->period_seqcount, seq)); | |
999 | } | |
1000 | ||
1001 | static void ioc_start_period(struct ioc *ioc, struct ioc_now *now) | |
1002 | { | |
7caa4715 TH |
1003 | WARN_ON_ONCE(ioc->running != IOC_RUNNING); |
1004 | ||
1005 | write_seqcount_begin(&ioc->period_seqcount); | |
1006 | ioc->period_at = now->now; | |
1007 | ioc->period_at_vtime = now->vnow; | |
1008 | write_seqcount_end(&ioc->period_seqcount); | |
1009 | ||
1010 | ioc->timer.expires = jiffies + usecs_to_jiffies(ioc->period_us); | |
1011 | add_timer(&ioc->timer); | |
1012 | } | |
1013 | ||
1014 | /* | |
1015 | * Update @iocg's `active` and `inuse` to @active and @inuse, update level | |
b0853ab4 TH |
1016 | * weight sums and propagate upwards accordingly. If @save, the current margin |
1017 | * is saved to be used as reference for later inuse in-period adjustments. | |
7caa4715 | 1018 | */ |
b0853ab4 TH |
1019 | static void __propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse, |
1020 | bool save, struct ioc_now *now) | |
7caa4715 TH |
1021 | { |
1022 | struct ioc *ioc = iocg->ioc; | |
1023 | int lvl; | |
1024 | ||
1025 | lockdep_assert_held(&ioc->lock); | |
1026 | ||
db84a72a TH |
1027 | inuse = clamp_t(u32, inuse, 1, active); |
1028 | ||
b0853ab4 TH |
1029 | iocg->last_inuse = iocg->inuse; |
1030 | if (save) | |
1031 | iocg->saved_margin = now->vnow - atomic64_read(&iocg->vtime); | |
1032 | ||
db84a72a TH |
1033 | if (active == iocg->active && inuse == iocg->inuse) |
1034 | return; | |
7caa4715 TH |
1035 | |
1036 | for (lvl = iocg->level - 1; lvl >= 0; lvl--) { | |
1037 | struct ioc_gq *parent = iocg->ancestors[lvl]; | |
1038 | struct ioc_gq *child = iocg->ancestors[lvl + 1]; | |
1039 | u32 parent_active = 0, parent_inuse = 0; | |
1040 | ||
1041 | /* update the level sums */ | |
1042 | parent->child_active_sum += (s32)(active - child->active); | |
1043 | parent->child_inuse_sum += (s32)(inuse - child->inuse); | |
1044 | /* apply the udpates */ | |
1045 | child->active = active; | |
1046 | child->inuse = inuse; | |
1047 | ||
1048 | /* | |
1049 | * The delta between inuse and active sums indicates that | |
1050 | * that much of weight is being given away. Parent's inuse | |
1051 | * and active should reflect the ratio. | |
1052 | */ | |
1053 | if (parent->child_active_sum) { | |
1054 | parent_active = parent->weight; | |
1055 | parent_inuse = DIV64_U64_ROUND_UP( | |
1056 | parent_active * parent->child_inuse_sum, | |
1057 | parent->child_active_sum); | |
1058 | } | |
1059 | ||
1060 | /* do we need to keep walking up? */ | |
1061 | if (parent_active == parent->active && | |
1062 | parent_inuse == parent->inuse) | |
1063 | break; | |
1064 | ||
1065 | active = parent_active; | |
1066 | inuse = parent_inuse; | |
1067 | } | |
1068 | ||
1069 | ioc->weights_updated = true; | |
1070 | } | |
1071 | ||
00410f1b | 1072 | static void commit_weights(struct ioc *ioc) |
7caa4715 TH |
1073 | { |
1074 | lockdep_assert_held(&ioc->lock); | |
1075 | ||
1076 | if (ioc->weights_updated) { | |
1077 | /* paired with rmb in current_hweight(), see there */ | |
1078 | smp_wmb(); | |
1079 | atomic_inc(&ioc->hweight_gen); | |
1080 | ioc->weights_updated = false; | |
1081 | } | |
1082 | } | |
1083 | ||
b0853ab4 TH |
1084 | static void propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse, |
1085 | bool save, struct ioc_now *now) | |
7caa4715 | 1086 | { |
b0853ab4 | 1087 | __propagate_weights(iocg, active, inuse, save, now); |
00410f1b | 1088 | commit_weights(iocg->ioc); |
7caa4715 TH |
1089 | } |
1090 | ||
1091 | static void current_hweight(struct ioc_gq *iocg, u32 *hw_activep, u32 *hw_inusep) | |
1092 | { | |
1093 | struct ioc *ioc = iocg->ioc; | |
1094 | int lvl; | |
1095 | u32 hwa, hwi; | |
1096 | int ioc_gen; | |
1097 | ||
1098 | /* hot path - if uptodate, use cached */ | |
1099 | ioc_gen = atomic_read(&ioc->hweight_gen); | |
1100 | if (ioc_gen == iocg->hweight_gen) | |
1101 | goto out; | |
1102 | ||
1103 | /* | |
00410f1b TH |
1104 | * Paired with wmb in commit_weights(). If we saw the updated |
1105 | * hweight_gen, all the weight updates from __propagate_weights() are | |
1106 | * visible too. | |
7caa4715 TH |
1107 | * |
1108 | * We can race with weight updates during calculation and get it | |
1109 | * wrong. However, hweight_gen would have changed and a future | |
1110 | * reader will recalculate and we're guaranteed to discard the | |
1111 | * wrong result soon. | |
1112 | */ | |
1113 | smp_rmb(); | |
1114 | ||
fe20cdb5 | 1115 | hwa = hwi = WEIGHT_ONE; |
7caa4715 TH |
1116 | for (lvl = 0; lvl <= iocg->level - 1; lvl++) { |
1117 | struct ioc_gq *parent = iocg->ancestors[lvl]; | |
1118 | struct ioc_gq *child = iocg->ancestors[lvl + 1]; | |
bd0adb91 TH |
1119 | u64 active_sum = READ_ONCE(parent->child_active_sum); |
1120 | u64 inuse_sum = READ_ONCE(parent->child_inuse_sum); | |
7caa4715 TH |
1121 | u32 active = READ_ONCE(child->active); |
1122 | u32 inuse = READ_ONCE(child->inuse); | |
1123 | ||
1124 | /* we can race with deactivations and either may read as zero */ | |
1125 | if (!active_sum || !inuse_sum) | |
1126 | continue; | |
1127 | ||
bd0adb91 TH |
1128 | active_sum = max_t(u64, active, active_sum); |
1129 | hwa = div64_u64((u64)hwa * active, active_sum); | |
7caa4715 | 1130 | |
bd0adb91 TH |
1131 | inuse_sum = max_t(u64, inuse, inuse_sum); |
1132 | hwi = div64_u64((u64)hwi * inuse, inuse_sum); | |
7caa4715 TH |
1133 | } |
1134 | ||
1135 | iocg->hweight_active = max_t(u32, hwa, 1); | |
1136 | iocg->hweight_inuse = max_t(u32, hwi, 1); | |
1137 | iocg->hweight_gen = ioc_gen; | |
1138 | out: | |
1139 | if (hw_activep) | |
1140 | *hw_activep = iocg->hweight_active; | |
1141 | if (hw_inusep) | |
1142 | *hw_inusep = iocg->hweight_inuse; | |
1143 | } | |
1144 | ||
93f7d2db TH |
1145 | /* |
1146 | * Calculate the hweight_inuse @iocg would get with max @inuse assuming all the | |
1147 | * other weights stay unchanged. | |
1148 | */ | |
1149 | static u32 current_hweight_max(struct ioc_gq *iocg) | |
1150 | { | |
1151 | u32 hwm = WEIGHT_ONE; | |
1152 | u32 inuse = iocg->active; | |
1153 | u64 child_inuse_sum; | |
1154 | int lvl; | |
1155 | ||
1156 | lockdep_assert_held(&iocg->ioc->lock); | |
1157 | ||
1158 | for (lvl = iocg->level - 1; lvl >= 0; lvl--) { | |
1159 | struct ioc_gq *parent = iocg->ancestors[lvl]; | |
1160 | struct ioc_gq *child = iocg->ancestors[lvl + 1]; | |
1161 | ||
1162 | child_inuse_sum = parent->child_inuse_sum + inuse - child->inuse; | |
1163 | hwm = div64_u64((u64)hwm * inuse, child_inuse_sum); | |
1164 | inuse = DIV64_U64_ROUND_UP(parent->active * child_inuse_sum, | |
1165 | parent->child_active_sum); | |
1166 | } | |
1167 | ||
1168 | return max_t(u32, hwm, 1); | |
1169 | } | |
1170 | ||
b0853ab4 | 1171 | static void weight_updated(struct ioc_gq *iocg, struct ioc_now *now) |
7caa4715 TH |
1172 | { |
1173 | struct ioc *ioc = iocg->ioc; | |
1174 | struct blkcg_gq *blkg = iocg_to_blkg(iocg); | |
1175 | struct ioc_cgrp *iocc = blkcg_to_iocc(blkg->blkcg); | |
1176 | u32 weight; | |
1177 | ||
1178 | lockdep_assert_held(&ioc->lock); | |
1179 | ||
1180 | weight = iocg->cfg_weight ?: iocc->dfl_weight; | |
1181 | if (weight != iocg->weight && iocg->active) | |
b0853ab4 | 1182 | propagate_weights(iocg, weight, iocg->inuse, true, now); |
7caa4715 TH |
1183 | iocg->weight = weight; |
1184 | } | |
1185 | ||
1186 | static bool iocg_activate(struct ioc_gq *iocg, struct ioc_now *now) | |
1187 | { | |
1188 | struct ioc *ioc = iocg->ioc; | |
ac33e91e TH |
1189 | u64 last_period, cur_period; |
1190 | u64 vtime, vtarget; | |
7caa4715 TH |
1191 | int i; |
1192 | ||
1193 | /* | |
1194 | * If seem to be already active, just update the stamp to tell the | |
1195 | * timer that we're still active. We don't mind occassional races. | |
1196 | */ | |
1197 | if (!list_empty(&iocg->active_list)) { | |
1198 | ioc_now(ioc, now); | |
1199 | cur_period = atomic64_read(&ioc->cur_period); | |
1200 | if (atomic64_read(&iocg->active_period) != cur_period) | |
1201 | atomic64_set(&iocg->active_period, cur_period); | |
1202 | return true; | |
1203 | } | |
1204 | ||
1205 | /* racy check on internal node IOs, treat as root level IOs */ | |
1206 | if (iocg->child_active_sum) | |
1207 | return false; | |
1208 | ||
1209 | spin_lock_irq(&ioc->lock); | |
1210 | ||
1211 | ioc_now(ioc, now); | |
1212 | ||
1213 | /* update period */ | |
1214 | cur_period = atomic64_read(&ioc->cur_period); | |
1215 | last_period = atomic64_read(&iocg->active_period); | |
1216 | atomic64_set(&iocg->active_period, cur_period); | |
1217 | ||
1218 | /* already activated or breaking leaf-only constraint? */ | |
8b37bc27 JX |
1219 | if (!list_empty(&iocg->active_list)) |
1220 | goto succeed_unlock; | |
1221 | for (i = iocg->level - 1; i > 0; i--) | |
1222 | if (!list_empty(&iocg->ancestors[i]->active_list)) | |
7caa4715 | 1223 | goto fail_unlock; |
8b37bc27 | 1224 | |
7caa4715 TH |
1225 | if (iocg->child_active_sum) |
1226 | goto fail_unlock; | |
1227 | ||
1228 | /* | |
ac33e91e TH |
1229 | * Always start with the target budget. On deactivation, we throw away |
1230 | * anything above it. | |
7caa4715 | 1231 | */ |
ac33e91e | 1232 | vtarget = now->vnow - ioc->margins.target; |
7caa4715 | 1233 | vtime = atomic64_read(&iocg->vtime); |
7caa4715 | 1234 | |
ac33e91e TH |
1235 | atomic64_add(vtarget - vtime, &iocg->vtime); |
1236 | atomic64_add(vtarget - vtime, &iocg->done_vtime); | |
1237 | vtime = vtarget; | |
7caa4715 TH |
1238 | |
1239 | /* | |
1240 | * Activate, propagate weight and start period timer if not | |
1241 | * running. Reset hweight_gen to avoid accidental match from | |
1242 | * wrapping. | |
1243 | */ | |
1244 | iocg->hweight_gen = atomic_read(&ioc->hweight_gen) - 1; | |
1245 | list_add(&iocg->active_list, &ioc->active_iocgs); | |
b0853ab4 | 1246 | |
00410f1b | 1247 | propagate_weights(iocg, iocg->weight, |
b0853ab4 | 1248 | iocg->last_inuse ?: iocg->weight, true, now); |
7caa4715 TH |
1249 | |
1250 | TRACE_IOCG_PATH(iocg_activate, iocg, now, | |
1251 | last_period, cur_period, vtime); | |
1252 | ||
1aa50d02 | 1253 | iocg->activated_at = now->now; |
7caa4715 TH |
1254 | |
1255 | if (ioc->running == IOC_IDLE) { | |
1256 | ioc->running = IOC_RUNNING; | |
dda1315f | 1257 | ioc->debt_busy_at = now->now; |
7caa4715 TH |
1258 | ioc_start_period(ioc, now); |
1259 | } | |
1260 | ||
8b37bc27 | 1261 | succeed_unlock: |
7caa4715 TH |
1262 | spin_unlock_irq(&ioc->lock); |
1263 | return true; | |
1264 | ||
1265 | fail_unlock: | |
1266 | spin_unlock_irq(&ioc->lock); | |
1267 | return false; | |
1268 | } | |
1269 | ||
6ef20f78 TH |
1270 | static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now) |
1271 | { | |
1272 | struct ioc *ioc = iocg->ioc; | |
1273 | struct blkcg_gq *blkg = iocg_to_blkg(iocg); | |
5160a5a5 TH |
1274 | u64 tdelta, delay, new_delay; |
1275 | s64 vover, vover_pct; | |
c421a3eb | 1276 | u32 hwa; |
6ef20f78 TH |
1277 | |
1278 | lockdep_assert_held(&iocg->waitq.lock); | |
1279 | ||
5160a5a5 TH |
1280 | /* calculate the current delay in effect - 1/2 every second */ |
1281 | tdelta = now->now - iocg->delay_at; | |
1282 | if (iocg->delay) | |
1283 | delay = iocg->delay >> div64_u64(tdelta, USEC_PER_SEC); | |
1284 | else | |
1285 | delay = 0; | |
1286 | ||
1287 | /* calculate the new delay from the debt amount */ | |
c421a3eb | 1288 | current_hweight(iocg, &hwa, NULL); |
5160a5a5 TH |
1289 | vover = atomic64_read(&iocg->vtime) + |
1290 | abs_cost_to_cost(iocg->abs_vdebt, hwa) - now->vnow; | |
ac33e91e TH |
1291 | vover_pct = div64_s64(100 * vover, |
1292 | ioc->period_us * ioc->vtime_base_rate); | |
5160a5a5 TH |
1293 | |
1294 | if (vover_pct <= MIN_DELAY_THR_PCT) | |
1295 | new_delay = 0; | |
1296 | else if (vover_pct >= MAX_DELAY_THR_PCT) | |
1297 | new_delay = MAX_DELAY; | |
1298 | else | |
1299 | new_delay = MIN_DELAY + | |
1300 | div_u64((MAX_DELAY - MIN_DELAY) * | |
1301 | (vover_pct - MIN_DELAY_THR_PCT), | |
1302 | MAX_DELAY_THR_PCT - MIN_DELAY_THR_PCT); | |
1303 | ||
1304 | /* pick the higher one and apply */ | |
1305 | if (new_delay > delay) { | |
1306 | iocg->delay = new_delay; | |
1307 | iocg->delay_at = now->now; | |
1308 | delay = new_delay; | |
1309 | } | |
6ef20f78 | 1310 | |
5160a5a5 | 1311 | if (delay >= MIN_DELAY) { |
f0bf84a5 TH |
1312 | if (!iocg->indelay_since) |
1313 | iocg->indelay_since = now->now; | |
5160a5a5 TH |
1314 | blkcg_set_delay(blkg, delay * NSEC_PER_USEC); |
1315 | return true; | |
1316 | } else { | |
f0bf84a5 TH |
1317 | if (iocg->indelay_since) { |
1318 | iocg->local_stat.indelay_us += now->now - iocg->indelay_since; | |
1319 | iocg->indelay_since = 0; | |
1320 | } | |
5160a5a5 | 1321 | iocg->delay = 0; |
6ef20f78 TH |
1322 | blkcg_clear_delay(blkg); |
1323 | return false; | |
1324 | } | |
6ef20f78 TH |
1325 | } |
1326 | ||
c421a3eb TH |
1327 | static void iocg_incur_debt(struct ioc_gq *iocg, u64 abs_cost, |
1328 | struct ioc_now *now) | |
1329 | { | |
1330 | struct iocg_pcpu_stat *gcs; | |
1331 | ||
1332 | lockdep_assert_held(&iocg->ioc->lock); | |
1333 | lockdep_assert_held(&iocg->waitq.lock); | |
1334 | WARN_ON_ONCE(list_empty(&iocg->active_list)); | |
1335 | ||
1336 | /* | |
1337 | * Once in debt, debt handling owns inuse. @iocg stays at the minimum | |
1338 | * inuse donating all of it share to others until its debt is paid off. | |
1339 | */ | |
f0bf84a5 TH |
1340 | if (!iocg->abs_vdebt && abs_cost) { |
1341 | iocg->indebt_since = now->now; | |
c421a3eb | 1342 | propagate_weights(iocg, iocg->active, 0, false, now); |
f0bf84a5 | 1343 | } |
c421a3eb TH |
1344 | |
1345 | iocg->abs_vdebt += abs_cost; | |
1346 | ||
1347 | gcs = get_cpu_ptr(iocg->pcpu_stat); | |
1348 | local64_add(abs_cost, &gcs->abs_vusage); | |
1349 | put_cpu_ptr(gcs); | |
1350 | } | |
1351 | ||
1352 | static void iocg_pay_debt(struct ioc_gq *iocg, u64 abs_vpay, | |
1353 | struct ioc_now *now) | |
1354 | { | |
1355 | lockdep_assert_held(&iocg->ioc->lock); | |
1356 | lockdep_assert_held(&iocg->waitq.lock); | |
1357 | ||
1358 | /* make sure that nobody messed with @iocg */ | |
1359 | WARN_ON_ONCE(list_empty(&iocg->active_list)); | |
1360 | WARN_ON_ONCE(iocg->inuse > 1); | |
1361 | ||
1362 | iocg->abs_vdebt -= min(abs_vpay, iocg->abs_vdebt); | |
1363 | ||
1364 | /* if debt is paid in full, restore inuse */ | |
f0bf84a5 TH |
1365 | if (!iocg->abs_vdebt) { |
1366 | iocg->local_stat.indebt_us += now->now - iocg->indebt_since; | |
1367 | iocg->indebt_since = 0; | |
1368 | ||
c421a3eb TH |
1369 | propagate_weights(iocg, iocg->active, iocg->last_inuse, |
1370 | false, now); | |
f0bf84a5 | 1371 | } |
c421a3eb TH |
1372 | } |
1373 | ||
7caa4715 TH |
1374 | static int iocg_wake_fn(struct wait_queue_entry *wq_entry, unsigned mode, |
1375 | int flags, void *key) | |
1376 | { | |
1377 | struct iocg_wait *wait = container_of(wq_entry, struct iocg_wait, wait); | |
1378 | struct iocg_wake_ctx *ctx = (struct iocg_wake_ctx *)key; | |
1379 | u64 cost = abs_cost_to_cost(wait->abs_cost, ctx->hw_inuse); | |
1380 | ||
1381 | ctx->vbudget -= cost; | |
1382 | ||
1383 | if (ctx->vbudget < 0) | |
1384 | return -1; | |
1385 | ||
97eb1975 | 1386 | iocg_commit_bio(ctx->iocg, wait->bio, wait->abs_cost, cost); |
7caa4715 TH |
1387 | |
1388 | /* | |
1389 | * autoremove_wake_function() removes the wait entry only when it | |
1390 | * actually changed the task state. We want the wait always | |
1391 | * removed. Remove explicitly and use default_wake_function(). | |
1392 | */ | |
1393 | list_del_init(&wq_entry->entry); | |
1394 | wait->committed = true; | |
1395 | ||
1396 | default_wake_function(wq_entry, mode, flags, key); | |
1397 | return 0; | |
1398 | } | |
1399 | ||
da437b95 TH |
1400 | /* |
1401 | * Calculate the accumulated budget, pay debt if @pay_debt and wake up waiters | |
1402 | * accordingly. When @pay_debt is %true, the caller must be holding ioc->lock in | |
1403 | * addition to iocg->waitq.lock. | |
1404 | */ | |
1405 | static void iocg_kick_waitq(struct ioc_gq *iocg, bool pay_debt, | |
1406 | struct ioc_now *now) | |
7caa4715 TH |
1407 | { |
1408 | struct ioc *ioc = iocg->ioc; | |
1409 | struct iocg_wake_ctx ctx = { .iocg = iocg }; | |
da437b95 | 1410 | u64 vshortage, expires, oexpires; |
36a52481 | 1411 | s64 vbudget; |
c421a3eb | 1412 | u32 hwa; |
7caa4715 TH |
1413 | |
1414 | lockdep_assert_held(&iocg->waitq.lock); | |
1415 | ||
c421a3eb | 1416 | current_hweight(iocg, &hwa, NULL); |
36a52481 TH |
1417 | vbudget = now->vnow - atomic64_read(&iocg->vtime); |
1418 | ||
1419 | /* pay off debt */ | |
da437b95 | 1420 | if (pay_debt && iocg->abs_vdebt && vbudget > 0) { |
c421a3eb TH |
1421 | u64 abs_vbudget = cost_to_abs_cost(vbudget, hwa); |
1422 | u64 abs_vpay = min_t(u64, abs_vbudget, iocg->abs_vdebt); | |
1423 | u64 vpay = abs_cost_to_cost(abs_vpay, hwa); | |
36a52481 | 1424 | |
da437b95 TH |
1425 | lockdep_assert_held(&ioc->lock); |
1426 | ||
c421a3eb TH |
1427 | atomic64_add(vpay, &iocg->vtime); |
1428 | atomic64_add(vpay, &iocg->done_vtime); | |
1429 | iocg_pay_debt(iocg, abs_vpay, now); | |
1430 | vbudget -= vpay; | |
5160a5a5 | 1431 | } |
7b84b49e | 1432 | |
5160a5a5 | 1433 | if (iocg->abs_vdebt || iocg->delay) |
7b84b49e | 1434 | iocg_kick_delay(iocg, now); |
36a52481 | 1435 | |
da437b95 TH |
1436 | /* |
1437 | * Debt can still be outstanding if we haven't paid all yet or the | |
1438 | * caller raced and called without @pay_debt. Shouldn't wake up waiters | |
1439 | * under debt. Make sure @vbudget reflects the outstanding amount and is | |
1440 | * not positive. | |
1441 | */ | |
1442 | if (iocg->abs_vdebt) { | |
c421a3eb | 1443 | s64 vdebt = abs_cost_to_cost(iocg->abs_vdebt, hwa); |
da437b95 TH |
1444 | vbudget = min_t(s64, 0, vbudget - vdebt); |
1445 | } | |
1446 | ||
7caa4715 | 1447 | /* |
c421a3eb TH |
1448 | * Wake up the ones which are due and see how much vtime we'll need for |
1449 | * the next one. As paying off debt restores hw_inuse, it must be read | |
1450 | * after the above debt payment. | |
7caa4715 | 1451 | */ |
da437b95 | 1452 | ctx.vbudget = vbudget; |
c421a3eb TH |
1453 | current_hweight(iocg, NULL, &ctx.hw_inuse); |
1454 | ||
7caa4715 | 1455 | __wake_up_locked_key(&iocg->waitq, TASK_NORMAL, &ctx); |
c421a3eb | 1456 | |
f0bf84a5 TH |
1457 | if (!waitqueue_active(&iocg->waitq)) { |
1458 | if (iocg->wait_since) { | |
1459 | iocg->local_stat.wait_us += now->now - iocg->wait_since; | |
1460 | iocg->wait_since = 0; | |
1461 | } | |
7caa4715 | 1462 | return; |
f0bf84a5 TH |
1463 | } |
1464 | ||
1465 | if (!iocg->wait_since) | |
1466 | iocg->wait_since = now->now; | |
1467 | ||
7caa4715 TH |
1468 | if (WARN_ON_ONCE(ctx.vbudget >= 0)) |
1469 | return; | |
1470 | ||
7ca5b2e6 | 1471 | /* determine next wakeup, add a timer margin to guarantee chunking */ |
7caa4715 TH |
1472 | vshortage = -ctx.vbudget; |
1473 | expires = now->now_ns + | |
ac33e91e TH |
1474 | DIV64_U64_ROUND_UP(vshortage, ioc->vtime_base_rate) * |
1475 | NSEC_PER_USEC; | |
7ca5b2e6 | 1476 | expires += ioc->timer_slack_ns; |
7caa4715 TH |
1477 | |
1478 | /* if already active and close enough, don't bother */ | |
1479 | oexpires = ktime_to_ns(hrtimer_get_softexpires(&iocg->waitq_timer)); | |
1480 | if (hrtimer_is_queued(&iocg->waitq_timer) && | |
7ca5b2e6 | 1481 | abs(oexpires - expires) <= ioc->timer_slack_ns) |
7caa4715 TH |
1482 | return; |
1483 | ||
1484 | hrtimer_start_range_ns(&iocg->waitq_timer, ns_to_ktime(expires), | |
7ca5b2e6 | 1485 | ioc->timer_slack_ns, HRTIMER_MODE_ABS); |
7caa4715 TH |
1486 | } |
1487 | ||
1488 | static enum hrtimer_restart iocg_waitq_timer_fn(struct hrtimer *timer) | |
1489 | { | |
1490 | struct ioc_gq *iocg = container_of(timer, struct ioc_gq, waitq_timer); | |
da437b95 | 1491 | bool pay_debt = READ_ONCE(iocg->abs_vdebt); |
7caa4715 TH |
1492 | struct ioc_now now; |
1493 | unsigned long flags; | |
1494 | ||
1495 | ioc_now(iocg->ioc, &now); | |
1496 | ||
da437b95 TH |
1497 | iocg_lock(iocg, pay_debt, &flags); |
1498 | iocg_kick_waitq(iocg, pay_debt, &now); | |
1499 | iocg_unlock(iocg, pay_debt, &flags); | |
7caa4715 TH |
1500 | |
1501 | return HRTIMER_NORESTART; | |
1502 | } | |
1503 | ||
7caa4715 TH |
1504 | static void ioc_lat_stat(struct ioc *ioc, u32 *missed_ppm_ar, u32 *rq_wait_pct_p) |
1505 | { | |
1506 | u32 nr_met[2] = { }; | |
1507 | u32 nr_missed[2] = { }; | |
1508 | u64 rq_wait_ns = 0; | |
1509 | int cpu, rw; | |
1510 | ||
1511 | for_each_online_cpu(cpu) { | |
1512 | struct ioc_pcpu_stat *stat = per_cpu_ptr(ioc->pcpu_stat, cpu); | |
1513 | u64 this_rq_wait_ns; | |
1514 | ||
1515 | for (rw = READ; rw <= WRITE; rw++) { | |
5e124f74 TH |
1516 | u32 this_met = local_read(&stat->missed[rw].nr_met); |
1517 | u32 this_missed = local_read(&stat->missed[rw].nr_missed); | |
7caa4715 TH |
1518 | |
1519 | nr_met[rw] += this_met - stat->missed[rw].last_met; | |
1520 | nr_missed[rw] += this_missed - stat->missed[rw].last_missed; | |
1521 | stat->missed[rw].last_met = this_met; | |
1522 | stat->missed[rw].last_missed = this_missed; | |
1523 | } | |
1524 | ||
5e124f74 | 1525 | this_rq_wait_ns = local64_read(&stat->rq_wait_ns); |
7caa4715 TH |
1526 | rq_wait_ns += this_rq_wait_ns - stat->last_rq_wait_ns; |
1527 | stat->last_rq_wait_ns = this_rq_wait_ns; | |
1528 | } | |
1529 | ||
1530 | for (rw = READ; rw <= WRITE; rw++) { | |
1531 | if (nr_met[rw] + nr_missed[rw]) | |
1532 | missed_ppm_ar[rw] = | |
1533 | DIV64_U64_ROUND_UP((u64)nr_missed[rw] * MILLION, | |
1534 | nr_met[rw] + nr_missed[rw]); | |
1535 | else | |
1536 | missed_ppm_ar[rw] = 0; | |
1537 | } | |
1538 | ||
1539 | *rq_wait_pct_p = div64_u64(rq_wait_ns * 100, | |
1540 | ioc->period_us * NSEC_PER_USEC); | |
1541 | } | |
1542 | ||
1543 | /* was iocg idle this period? */ | |
1544 | static bool iocg_is_idle(struct ioc_gq *iocg) | |
1545 | { | |
1546 | struct ioc *ioc = iocg->ioc; | |
1547 | ||
1548 | /* did something get issued this period? */ | |
1549 | if (atomic64_read(&iocg->active_period) == | |
1550 | atomic64_read(&ioc->cur_period)) | |
1551 | return false; | |
1552 | ||
1553 | /* is something in flight? */ | |
dcd6589b | 1554 | if (atomic64_read(&iocg->done_vtime) != atomic64_read(&iocg->vtime)) |
7caa4715 TH |
1555 | return false; |
1556 | ||
1557 | return true; | |
1558 | } | |
1559 | ||
97eb1975 TH |
1560 | /* |
1561 | * Call this function on the target leaf @iocg's to build pre-order traversal | |
1562 | * list of all the ancestors in @inner_walk. The inner nodes are linked through | |
1563 | * ->walk_list and the caller is responsible for dissolving the list after use. | |
1564 | */ | |
1565 | static void iocg_build_inner_walk(struct ioc_gq *iocg, | |
1566 | struct list_head *inner_walk) | |
1567 | { | |
1568 | int lvl; | |
1569 | ||
1570 | WARN_ON_ONCE(!list_empty(&iocg->walk_list)); | |
1571 | ||
1572 | /* find the first ancestor which hasn't been visited yet */ | |
1573 | for (lvl = iocg->level - 1; lvl >= 0; lvl--) { | |
1574 | if (!list_empty(&iocg->ancestors[lvl]->walk_list)) | |
1575 | break; | |
1576 | } | |
1577 | ||
1578 | /* walk down and visit the inner nodes to get pre-order traversal */ | |
1579 | while (++lvl <= iocg->level - 1) { | |
1580 | struct ioc_gq *inner = iocg->ancestors[lvl]; | |
1581 | ||
1582 | /* record traversal order */ | |
1583 | list_add_tail(&inner->walk_list, inner_walk); | |
1584 | } | |
1585 | } | |
1586 | ||
1587 | /* collect per-cpu counters and propagate the deltas to the parent */ | |
1588 | static void iocg_flush_stat_one(struct ioc_gq *iocg, struct ioc_now *now) | |
1589 | { | |
ac33e91e | 1590 | struct ioc *ioc = iocg->ioc; |
97eb1975 TH |
1591 | struct iocg_stat new_stat; |
1592 | u64 abs_vusage = 0; | |
1593 | u64 vusage_delta; | |
1594 | int cpu; | |
1595 | ||
1596 | lockdep_assert_held(&iocg->ioc->lock); | |
1597 | ||
1598 | /* collect per-cpu counters */ | |
1599 | for_each_possible_cpu(cpu) { | |
1600 | abs_vusage += local64_read( | |
1601 | per_cpu_ptr(&iocg->pcpu_stat->abs_vusage, cpu)); | |
1602 | } | |
1603 | vusage_delta = abs_vusage - iocg->last_stat_abs_vusage; | |
1604 | iocg->last_stat_abs_vusage = abs_vusage; | |
1605 | ||
ac33e91e | 1606 | iocg->usage_delta_us = div64_u64(vusage_delta, ioc->vtime_base_rate); |
1aa50d02 | 1607 | iocg->local_stat.usage_us += iocg->usage_delta_us; |
97eb1975 | 1608 | |
f0bf84a5 | 1609 | /* propagate upwards */ |
97eb1975 TH |
1610 | new_stat.usage_us = |
1611 | iocg->local_stat.usage_us + iocg->desc_stat.usage_us; | |
f0bf84a5 TH |
1612 | new_stat.wait_us = |
1613 | iocg->local_stat.wait_us + iocg->desc_stat.wait_us; | |
1614 | new_stat.indebt_us = | |
1615 | iocg->local_stat.indebt_us + iocg->desc_stat.indebt_us; | |
1616 | new_stat.indelay_us = | |
1617 | iocg->local_stat.indelay_us + iocg->desc_stat.indelay_us; | |
97eb1975 TH |
1618 | |
1619 | /* propagate the deltas to the parent */ | |
1620 | if (iocg->level > 0) { | |
1621 | struct iocg_stat *parent_stat = | |
1622 | &iocg->ancestors[iocg->level - 1]->desc_stat; | |
1623 | ||
1624 | parent_stat->usage_us += | |
1625 | new_stat.usage_us - iocg->last_stat.usage_us; | |
f0bf84a5 TH |
1626 | parent_stat->wait_us += |
1627 | new_stat.wait_us - iocg->last_stat.wait_us; | |
1628 | parent_stat->indebt_us += | |
1629 | new_stat.indebt_us - iocg->last_stat.indebt_us; | |
1630 | parent_stat->indelay_us += | |
1631 | new_stat.indelay_us - iocg->last_stat.indelay_us; | |
97eb1975 TH |
1632 | } |
1633 | ||
1634 | iocg->last_stat = new_stat; | |
1635 | } | |
1636 | ||
1637 | /* get stat counters ready for reading on all active iocgs */ | |
1638 | static void iocg_flush_stat(struct list_head *target_iocgs, struct ioc_now *now) | |
1639 | { | |
1640 | LIST_HEAD(inner_walk); | |
1641 | struct ioc_gq *iocg, *tiocg; | |
1642 | ||
1643 | /* flush leaves and build inner node walk list */ | |
1644 | list_for_each_entry(iocg, target_iocgs, active_list) { | |
1645 | iocg_flush_stat_one(iocg, now); | |
1646 | iocg_build_inner_walk(iocg, &inner_walk); | |
1647 | } | |
1648 | ||
1649 | /* keep flushing upwards by walking the inner list backwards */ | |
1650 | list_for_each_entry_safe_reverse(iocg, tiocg, &inner_walk, walk_list) { | |
1651 | iocg_flush_stat_one(iocg, now); | |
1652 | list_del_init(&iocg->walk_list); | |
1653 | } | |
1654 | } | |
1655 | ||
93f7d2db TH |
1656 | /* |
1657 | * Determine what @iocg's hweight_inuse should be after donating unused | |
1658 | * capacity. @hwm is the upper bound and used to signal no donation. This | |
1659 | * function also throws away @iocg's excess budget. | |
1660 | */ | |
ac33e91e TH |
1661 | static u32 hweight_after_donation(struct ioc_gq *iocg, u32 old_hwi, u32 hwm, |
1662 | u32 usage, struct ioc_now *now) | |
7caa4715 | 1663 | { |
93f7d2db TH |
1664 | struct ioc *ioc = iocg->ioc; |
1665 | u64 vtime = atomic64_read(&iocg->vtime); | |
f1de2439 | 1666 | s64 excess, delta, target, new_hwi; |
93f7d2db | 1667 | |
c421a3eb TH |
1668 | /* debt handling owns inuse for debtors */ |
1669 | if (iocg->abs_vdebt) | |
1670 | return 1; | |
1671 | ||
93f7d2db TH |
1672 | /* see whether minimum margin requirement is met */ |
1673 | if (waitqueue_active(&iocg->waitq) || | |
1674 | time_after64(vtime, now->vnow - ioc->margins.min)) | |
1675 | return hwm; | |
1676 | ||
ac33e91e TH |
1677 | /* throw away excess above target */ |
1678 | excess = now->vnow - vtime - ioc->margins.target; | |
93f7d2db TH |
1679 | if (excess > 0) { |
1680 | atomic64_add(excess, &iocg->vtime); | |
1681 | atomic64_add(excess, &iocg->done_vtime); | |
1682 | vtime += excess; | |
ac33e91e | 1683 | ioc->vtime_err -= div64_u64(excess * old_hwi, WEIGHT_ONE); |
93f7d2db TH |
1684 | } |
1685 | ||
f1de2439 TH |
1686 | /* |
1687 | * Let's say the distance between iocg's and device's vtimes as a | |
1688 | * fraction of period duration is delta. Assuming that the iocg will | |
1689 | * consume the usage determined above, we want to determine new_hwi so | |
1690 | * that delta equals MARGIN_TARGET at the end of the next period. | |
1691 | * | |
1692 | * We need to execute usage worth of IOs while spending the sum of the | |
1693 | * new budget (1 - MARGIN_TARGET) and the leftover from the last period | |
1694 | * (delta): | |
1695 | * | |
1696 | * usage = (1 - MARGIN_TARGET + delta) * new_hwi | |
1697 | * | |
1698 | * Therefore, the new_hwi is: | |
1699 | * | |
1700 | * new_hwi = usage / (1 - MARGIN_TARGET + delta) | |
1701 | */ | |
1702 | delta = div64_s64(WEIGHT_ONE * (now->vnow - vtime), | |
1703 | now->vnow - ioc->period_at_vtime); | |
1704 | target = WEIGHT_ONE * MARGIN_TARGET_PCT / 100; | |
1705 | new_hwi = div64_s64(WEIGHT_ONE * usage, WEIGHT_ONE - target + delta); | |
7caa4715 | 1706 | |
f1de2439 | 1707 | return clamp_t(s64, new_hwi, 1, hwm); |
7caa4715 TH |
1708 | } |
1709 | ||
e08d02aa TH |
1710 | /* |
1711 | * For work-conservation, an iocg which isn't using all of its share should | |
1712 | * donate the leftover to other iocgs. There are two ways to achieve this - 1. | |
1713 | * bumping up vrate accordingly 2. lowering the donating iocg's inuse weight. | |
1714 | * | |
1715 | * #1 is mathematically simpler but has the drawback of requiring synchronous | |
1716 | * global hweight_inuse updates when idle iocg's get activated or inuse weights | |
1717 | * change due to donation snapbacks as it has the possibility of grossly | |
1718 | * overshooting what's allowed by the model and vrate. | |
1719 | * | |
1720 | * #2 is inherently safe with local operations. The donating iocg can easily | |
1721 | * snap back to higher weights when needed without worrying about impacts on | |
1722 | * other nodes as the impacts will be inherently correct. This also makes idle | |
1723 | * iocg activations safe. The only effect activations have is decreasing | |
1724 | * hweight_inuse of others, the right solution to which is for those iocgs to | |
1725 | * snap back to higher weights. | |
1726 | * | |
1727 | * So, we go with #2. The challenge is calculating how each donating iocg's | |
1728 | * inuse should be adjusted to achieve the target donation amounts. This is done | |
1729 | * using Andy's method described in the following pdf. | |
1730 | * | |
1731 | * https://drive.google.com/file/d/1PsJwxPFtjUnwOY1QJ5AeICCcsL7BM3bo | |
1732 | * | |
1733 | * Given the weights and target after-donation hweight_inuse values, Andy's | |
1734 | * method determines how the proportional distribution should look like at each | |
1735 | * sibling level to maintain the relative relationship between all non-donating | |
1736 | * pairs. To roughly summarize, it divides the tree into donating and | |
1737 | * non-donating parts, calculates global donation rate which is used to | |
1738 | * determine the target hweight_inuse for each node, and then derives per-level | |
1739 | * proportions. | |
1740 | * | |
1741 | * The following pdf shows that global distribution calculated this way can be | |
1742 | * achieved by scaling inuse weights of donating leaves and propagating the | |
1743 | * adjustments upwards proportionally. | |
1744 | * | |
1745 | * https://drive.google.com/file/d/1vONz1-fzVO7oY5DXXsLjSxEtYYQbOvsE | |
1746 | * | |
1747 | * Combining the above two, we can determine how each leaf iocg's inuse should | |
1748 | * be adjusted to achieve the target donation. | |
1749 | * | |
1750 | * https://drive.google.com/file/d/1WcrltBOSPN0qXVdBgnKm4mdp9FhuEFQN | |
1751 | * | |
1752 | * The inline comments use symbols from the last pdf. | |
1753 | * | |
1754 | * b is the sum of the absolute budgets in the subtree. 1 for the root node. | |
1755 | * f is the sum of the absolute budgets of non-donating nodes in the subtree. | |
1756 | * t is the sum of the absolute budgets of donating nodes in the subtree. | |
1757 | * w is the weight of the node. w = w_f + w_t | |
1758 | * w_f is the non-donating portion of w. w_f = w * f / b | |
1759 | * w_b is the donating portion of w. w_t = w * t / b | |
1760 | * s is the sum of all sibling weights. s = Sum(w) for siblings | |
1761 | * s_f and s_t are the non-donating and donating portions of s. | |
1762 | * | |
1763 | * Subscript p denotes the parent's counterpart and ' the adjusted value - e.g. | |
1764 | * w_pt is the donating portion of the parent's weight and w'_pt the same value | |
1765 | * after adjustments. Subscript r denotes the root node's values. | |
1766 | */ | |
93f7d2db TH |
1767 | static void transfer_surpluses(struct list_head *surpluses, struct ioc_now *now) |
1768 | { | |
e08d02aa TH |
1769 | LIST_HEAD(over_hwa); |
1770 | LIST_HEAD(inner_walk); | |
1771 | struct ioc_gq *iocg, *tiocg, *root_iocg; | |
1772 | u32 after_sum, over_sum, over_target, gamma; | |
93f7d2db | 1773 | |
e08d02aa TH |
1774 | /* |
1775 | * It's pretty unlikely but possible for the total sum of | |
1776 | * hweight_after_donation's to be higher than WEIGHT_ONE, which will | |
1777 | * confuse the following calculations. If such condition is detected, | |
1778 | * scale down everyone over its full share equally to keep the sum below | |
1779 | * WEIGHT_ONE. | |
1780 | */ | |
1781 | after_sum = 0; | |
1782 | over_sum = 0; | |
93f7d2db | 1783 | list_for_each_entry(iocg, surpluses, surplus_list) { |
e08d02aa | 1784 | u32 hwa; |
93f7d2db | 1785 | |
e08d02aa TH |
1786 | current_hweight(iocg, &hwa, NULL); |
1787 | after_sum += iocg->hweight_after_donation; | |
93f7d2db | 1788 | |
e08d02aa TH |
1789 | if (iocg->hweight_after_donation > hwa) { |
1790 | over_sum += iocg->hweight_after_donation; | |
1791 | list_add(&iocg->walk_list, &over_hwa); | |
1792 | } | |
93f7d2db | 1793 | } |
e08d02aa TH |
1794 | |
1795 | if (after_sum >= WEIGHT_ONE) { | |
1796 | /* | |
1797 | * The delta should be deducted from the over_sum, calculate | |
1798 | * target over_sum value. | |
1799 | */ | |
1800 | u32 over_delta = after_sum - (WEIGHT_ONE - 1); | |
1801 | WARN_ON_ONCE(over_sum <= over_delta); | |
1802 | over_target = over_sum - over_delta; | |
1803 | } else { | |
1804 | over_target = 0; | |
1805 | } | |
1806 | ||
1807 | list_for_each_entry_safe(iocg, tiocg, &over_hwa, walk_list) { | |
1808 | if (over_target) | |
1809 | iocg->hweight_after_donation = | |
1810 | div_u64((u64)iocg->hweight_after_donation * | |
1811 | over_target, over_sum); | |
1812 | list_del_init(&iocg->walk_list); | |
1813 | } | |
1814 | ||
1815 | /* | |
1816 | * Build pre-order inner node walk list and prepare for donation | |
1817 | * adjustment calculations. | |
1818 | */ | |
1819 | list_for_each_entry(iocg, surpluses, surplus_list) { | |
1820 | iocg_build_inner_walk(iocg, &inner_walk); | |
1821 | } | |
1822 | ||
1823 | root_iocg = list_first_entry(&inner_walk, struct ioc_gq, walk_list); | |
1824 | WARN_ON_ONCE(root_iocg->level > 0); | |
1825 | ||
1826 | list_for_each_entry(iocg, &inner_walk, walk_list) { | |
1827 | iocg->child_adjusted_sum = 0; | |
1828 | iocg->hweight_donating = 0; | |
1829 | iocg->hweight_after_donation = 0; | |
1830 | } | |
1831 | ||
1832 | /* | |
1833 | * Propagate the donating budget (b_t) and after donation budget (b'_t) | |
1834 | * up the hierarchy. | |
1835 | */ | |
1836 | list_for_each_entry(iocg, surpluses, surplus_list) { | |
1837 | struct ioc_gq *parent = iocg->ancestors[iocg->level - 1]; | |
1838 | ||
1839 | parent->hweight_donating += iocg->hweight_donating; | |
1840 | parent->hweight_after_donation += iocg->hweight_after_donation; | |
1841 | } | |
1842 | ||
1843 | list_for_each_entry_reverse(iocg, &inner_walk, walk_list) { | |
1844 | if (iocg->level > 0) { | |
1845 | struct ioc_gq *parent = iocg->ancestors[iocg->level - 1]; | |
1846 | ||
1847 | parent->hweight_donating += iocg->hweight_donating; | |
1848 | parent->hweight_after_donation += iocg->hweight_after_donation; | |
1849 | } | |
1850 | } | |
1851 | ||
1852 | /* | |
1853 | * Calculate inner hwa's (b) and make sure the donation values are | |
1854 | * within the accepted ranges as we're doing low res calculations with | |
1855 | * roundups. | |
1856 | */ | |
1857 | list_for_each_entry(iocg, &inner_walk, walk_list) { | |
1858 | if (iocg->level) { | |
1859 | struct ioc_gq *parent = iocg->ancestors[iocg->level - 1]; | |
1860 | ||
1861 | iocg->hweight_active = DIV64_U64_ROUND_UP( | |
1862 | (u64)parent->hweight_active * iocg->active, | |
1863 | parent->child_active_sum); | |
1864 | ||
1865 | } | |
1866 | ||
1867 | iocg->hweight_donating = min(iocg->hweight_donating, | |
1868 | iocg->hweight_active); | |
1869 | iocg->hweight_after_donation = min(iocg->hweight_after_donation, | |
1870 | iocg->hweight_donating - 1); | |
1871 | if (WARN_ON_ONCE(iocg->hweight_active <= 1 || | |
1872 | iocg->hweight_donating <= 1 || | |
1873 | iocg->hweight_after_donation == 0)) { | |
1874 | pr_warn("iocg: invalid donation weights in "); | |
1875 | pr_cont_cgroup_path(iocg_to_blkg(iocg)->blkcg->css.cgroup); | |
1876 | pr_cont(": active=%u donating=%u after=%u\n", | |
1877 | iocg->hweight_active, iocg->hweight_donating, | |
1878 | iocg->hweight_after_donation); | |
1879 | } | |
1880 | } | |
1881 | ||
1882 | /* | |
1883 | * Calculate the global donation rate (gamma) - the rate to adjust | |
769b628d TH |
1884 | * non-donating budgets by. |
1885 | * | |
1886 | * No need to use 64bit multiplication here as the first operand is | |
1887 | * guaranteed to be smaller than WEIGHT_ONE (1<<16). | |
1888 | * | |
1889 | * We know that there are beneficiary nodes and the sum of the donating | |
1890 | * hweights can't be whole; however, due to the round-ups during hweight | |
1891 | * calculations, root_iocg->hweight_donating might still end up equal to | |
1892 | * or greater than whole. Limit the range when calculating the divider. | |
e08d02aa TH |
1893 | * |
1894 | * gamma = (1 - t_r') / (1 - t_r) | |
1895 | */ | |
1896 | gamma = DIV_ROUND_UP( | |
1897 | (WEIGHT_ONE - root_iocg->hweight_after_donation) * WEIGHT_ONE, | |
769b628d | 1898 | WEIGHT_ONE - min_t(u32, root_iocg->hweight_donating, WEIGHT_ONE - 1)); |
e08d02aa TH |
1899 | |
1900 | /* | |
1901 | * Calculate adjusted hwi, child_adjusted_sum and inuse for the inner | |
1902 | * nodes. | |
1903 | */ | |
1904 | list_for_each_entry(iocg, &inner_walk, walk_list) { | |
1905 | struct ioc_gq *parent; | |
1906 | u32 inuse, wpt, wptp; | |
1907 | u64 st, sf; | |
1908 | ||
1909 | if (iocg->level == 0) { | |
1910 | /* adjusted weight sum for 1st level: s' = s * b_pf / b'_pf */ | |
1911 | iocg->child_adjusted_sum = DIV64_U64_ROUND_UP( | |
1912 | iocg->child_active_sum * (WEIGHT_ONE - iocg->hweight_donating), | |
1913 | WEIGHT_ONE - iocg->hweight_after_donation); | |
1914 | continue; | |
1915 | } | |
1916 | ||
1917 | parent = iocg->ancestors[iocg->level - 1]; | |
1918 | ||
1919 | /* b' = gamma * b_f + b_t' */ | |
1920 | iocg->hweight_inuse = DIV64_U64_ROUND_UP( | |
1921 | (u64)gamma * (iocg->hweight_active - iocg->hweight_donating), | |
1922 | WEIGHT_ONE) + iocg->hweight_after_donation; | |
1923 | ||
1924 | /* w' = s' * b' / b'_p */ | |
1925 | inuse = DIV64_U64_ROUND_UP( | |
1926 | (u64)parent->child_adjusted_sum * iocg->hweight_inuse, | |
1927 | parent->hweight_inuse); | |
1928 | ||
1929 | /* adjusted weight sum for children: s' = s_f + s_t * w'_pt / w_pt */ | |
1930 | st = DIV64_U64_ROUND_UP( | |
1931 | iocg->child_active_sum * iocg->hweight_donating, | |
1932 | iocg->hweight_active); | |
1933 | sf = iocg->child_active_sum - st; | |
1934 | wpt = DIV64_U64_ROUND_UP( | |
1935 | (u64)iocg->active * iocg->hweight_donating, | |
1936 | iocg->hweight_active); | |
1937 | wptp = DIV64_U64_ROUND_UP( | |
1938 | (u64)inuse * iocg->hweight_after_donation, | |
1939 | iocg->hweight_inuse); | |
1940 | ||
1941 | iocg->child_adjusted_sum = sf + DIV64_U64_ROUND_UP(st * wptp, wpt); | |
1942 | } | |
1943 | ||
1944 | /* | |
1945 | * All inner nodes now have ->hweight_inuse and ->child_adjusted_sum and | |
1946 | * we can finally determine leaf adjustments. | |
1947 | */ | |
1948 | list_for_each_entry(iocg, surpluses, surplus_list) { | |
1949 | struct ioc_gq *parent = iocg->ancestors[iocg->level - 1]; | |
1950 | u32 inuse; | |
1951 | ||
c421a3eb TH |
1952 | /* |
1953 | * In-debt iocgs participated in the donation calculation with | |
1954 | * the minimum target hweight_inuse. Configuring inuse | |
1955 | * accordingly would work fine but debt handling expects | |
1956 | * @iocg->inuse stay at the minimum and we don't wanna | |
1957 | * interfere. | |
1958 | */ | |
1959 | if (iocg->abs_vdebt) { | |
1960 | WARN_ON_ONCE(iocg->inuse > 1); | |
1961 | continue; | |
1962 | } | |
1963 | ||
e08d02aa TH |
1964 | /* w' = s' * b' / b'_p, note that b' == b'_t for donating leaves */ |
1965 | inuse = DIV64_U64_ROUND_UP( | |
1966 | parent->child_adjusted_sum * iocg->hweight_after_donation, | |
1967 | parent->hweight_inuse); | |
04603755 TH |
1968 | |
1969 | TRACE_IOCG_PATH(inuse_transfer, iocg, now, | |
1970 | iocg->inuse, inuse, | |
1971 | iocg->hweight_inuse, | |
1972 | iocg->hweight_after_donation); | |
1973 | ||
b0853ab4 | 1974 | __propagate_weights(iocg, iocg->active, inuse, true, now); |
e08d02aa TH |
1975 | } |
1976 | ||
1977 | /* walk list should be dissolved after use */ | |
1978 | list_for_each_entry_safe(iocg, tiocg, &inner_walk, walk_list) | |
1979 | list_del_init(&iocg->walk_list); | |
93f7d2db TH |
1980 | } |
1981 | ||
7caa4715 TH |
1982 | static void ioc_timer_fn(struct timer_list *timer) |
1983 | { | |
1984 | struct ioc *ioc = container_of(timer, struct ioc, timer); | |
1985 | struct ioc_gq *iocg, *tiocg; | |
1986 | struct ioc_now now; | |
8692d2db | 1987 | LIST_HEAD(surpluses); |
dda1315f TH |
1988 | int nr_debtors = 0, nr_shortages = 0, nr_lagging = 0; |
1989 | u64 usage_us_sum = 0; | |
7caa4715 TH |
1990 | u32 ppm_rthr = MILLION - ioc->params.qos[QOS_RPPM]; |
1991 | u32 ppm_wthr = MILLION - ioc->params.qos[QOS_WPPM]; | |
1992 | u32 missed_ppm[2], rq_wait_pct; | |
1993 | u64 period_vtime; | |
f1de2439 | 1994 | int prev_busy_level; |
7caa4715 TH |
1995 | |
1996 | /* how were the latencies during the period? */ | |
1997 | ioc_lat_stat(ioc, missed_ppm, &rq_wait_pct); | |
1998 | ||
1999 | /* take care of active iocgs */ | |
2000 | spin_lock_irq(&ioc->lock); | |
2001 | ||
2002 | ioc_now(ioc, &now); | |
2003 | ||
2004 | period_vtime = now.vnow - ioc->period_at_vtime; | |
2005 | if (WARN_ON_ONCE(!period_vtime)) { | |
2006 | spin_unlock_irq(&ioc->lock); | |
2007 | return; | |
2008 | } | |
2009 | ||
2010 | /* | |
2011 | * Waiters determine the sleep durations based on the vrate they | |
2012 | * saw at the time of sleep. If vrate has increased, some waiters | |
2013 | * could be sleeping for too long. Wake up tardy waiters which | |
2014 | * should have woken up in the last period and expire idle iocgs. | |
2015 | */ | |
2016 | list_for_each_entry_safe(iocg, tiocg, &ioc->active_iocgs, active_list) { | |
d9012a59 | 2017 | if (!waitqueue_active(&iocg->waitq) && !iocg->abs_vdebt && |
5160a5a5 | 2018 | !iocg->delay && !iocg_is_idle(iocg)) |
7caa4715 TH |
2019 | continue; |
2020 | ||
2021 | spin_lock(&iocg->waitq.lock); | |
2022 | ||
f0bf84a5 TH |
2023 | /* flush wait and indebt stat deltas */ |
2024 | if (iocg->wait_since) { | |
2025 | iocg->local_stat.wait_us += now.now - iocg->wait_since; | |
2026 | iocg->wait_since = now.now; | |
2027 | } | |
2028 | if (iocg->indebt_since) { | |
2029 | iocg->local_stat.indebt_us += | |
2030 | now.now - iocg->indebt_since; | |
2031 | iocg->indebt_since = now.now; | |
2032 | } | |
2033 | if (iocg->indelay_since) { | |
2034 | iocg->local_stat.indelay_us += | |
2035 | now.now - iocg->indelay_since; | |
2036 | iocg->indelay_since = now.now; | |
2037 | } | |
2038 | ||
5160a5a5 TH |
2039 | if (waitqueue_active(&iocg->waitq) || iocg->abs_vdebt || |
2040 | iocg->delay) { | |
7caa4715 | 2041 | /* might be oversleeping vtime / hweight changes, kick */ |
da437b95 | 2042 | iocg_kick_waitq(iocg, true, &now); |
dda1315f TH |
2043 | if (iocg->abs_vdebt) |
2044 | nr_debtors++; | |
7caa4715 TH |
2045 | } else if (iocg_is_idle(iocg)) { |
2046 | /* no waiter and idle, deactivate */ | |
ac33e91e TH |
2047 | u64 vtime = atomic64_read(&iocg->vtime); |
2048 | s64 excess; | |
2049 | ||
2050 | /* | |
2051 | * @iocg has been inactive for a full duration and will | |
2052 | * have a high budget. Account anything above target as | |
2053 | * error and throw away. On reactivation, it'll start | |
2054 | * with the target budget. | |
2055 | */ | |
2056 | excess = now.vnow - vtime - ioc->margins.target; | |
2057 | if (excess > 0) { | |
2058 | u32 old_hwi; | |
2059 | ||
2060 | current_hweight(iocg, NULL, &old_hwi); | |
2061 | ioc->vtime_err -= div64_u64(excess * old_hwi, | |
2062 | WEIGHT_ONE); | |
2063 | } | |
2064 | ||
b0853ab4 | 2065 | __propagate_weights(iocg, 0, 0, false, &now); |
7caa4715 TH |
2066 | list_del_init(&iocg->active_list); |
2067 | } | |
2068 | ||
2069 | spin_unlock(&iocg->waitq.lock); | |
2070 | } | |
00410f1b | 2071 | commit_weights(ioc); |
7caa4715 | 2072 | |
f0bf84a5 TH |
2073 | /* |
2074 | * Wait and indebt stat are flushed above and the donation calculation | |
2075 | * below needs updated usage stat. Let's bring stat up-to-date. | |
2076 | */ | |
2077 | iocg_flush_stat(&ioc->active_iocgs, &now); | |
2078 | ||
f1de2439 | 2079 | /* calc usage and see whether some weights need to be moved around */ |
7caa4715 | 2080 | list_for_each_entry(iocg, &ioc->active_iocgs, active_list) { |
f1de2439 TH |
2081 | u64 vdone, vtime, usage_us, usage_dur; |
2082 | u32 usage, hw_active, hw_inuse; | |
7caa4715 TH |
2083 | |
2084 | /* | |
2085 | * Collect unused and wind vtime closer to vnow to prevent | |
2086 | * iocgs from accumulating a large amount of budget. | |
2087 | */ | |
2088 | vdone = atomic64_read(&iocg->done_vtime); | |
2089 | vtime = atomic64_read(&iocg->vtime); | |
2090 | current_hweight(iocg, &hw_active, &hw_inuse); | |
2091 | ||
2092 | /* | |
2093 | * Latency QoS detection doesn't account for IOs which are | |
2094 | * in-flight for longer than a period. Detect them by | |
2095 | * comparing vdone against period start. If lagging behind | |
2096 | * IOs from past periods, don't increase vrate. | |
2097 | */ | |
7cd806a9 TH |
2098 | if ((ppm_rthr != MILLION || ppm_wthr != MILLION) && |
2099 | !atomic_read(&iocg_to_blkg(iocg)->use_delay) && | |
7caa4715 TH |
2100 | time_after64(vtime, vdone) && |
2101 | time_after64(vtime, now.vnow - | |
2102 | MAX_LAGGING_PERIODS * period_vtime) && | |
2103 | time_before64(vdone, now.vnow - period_vtime)) | |
2104 | nr_lagging++; | |
2105 | ||
7caa4715 | 2106 | /* |
f1de2439 TH |
2107 | * Determine absolute usage factoring in in-flight IOs to avoid |
2108 | * high-latency completions appearing as idle. | |
7caa4715 | 2109 | */ |
1aa50d02 | 2110 | usage_us = iocg->usage_delta_us; |
dda1315f | 2111 | usage_us_sum += usage_us; |
f1de2439 | 2112 | |
1aa50d02 TH |
2113 | if (vdone != vtime) { |
2114 | u64 inflight_us = DIV64_U64_ROUND_UP( | |
2115 | cost_to_abs_cost(vtime - vdone, hw_inuse), | |
ac33e91e | 2116 | ioc->vtime_base_rate); |
1aa50d02 TH |
2117 | usage_us = max(usage_us, inflight_us); |
2118 | } | |
2119 | ||
f1de2439 TH |
2120 | /* convert to hweight based usage ratio */ |
2121 | if (time_after64(iocg->activated_at, ioc->period_at)) | |
2122 | usage_dur = max_t(u64, now.now - iocg->activated_at, 1); | |
2123 | else | |
2124 | usage_dur = max_t(u64, now.now - ioc->period_at, 1); | |
93f7d2db | 2125 | |
f1de2439 TH |
2126 | usage = clamp_t(u32, |
2127 | DIV64_U64_ROUND_UP(usage_us * WEIGHT_ONE, | |
2128 | usage_dur), | |
1aa50d02 | 2129 | 1, WEIGHT_ONE); |
7caa4715 TH |
2130 | |
2131 | /* see whether there's surplus vtime */ | |
8692d2db | 2132 | WARN_ON_ONCE(!list_empty(&iocg->surplus_list)); |
93f7d2db TH |
2133 | if (hw_inuse < hw_active || |
2134 | (!waitqueue_active(&iocg->waitq) && | |
f1de2439 | 2135 | time_before64(vtime, now.vnow - ioc->margins.low))) { |
ac33e91e | 2136 | u32 hwa, old_hwi, hwm, new_hwi; |
93f7d2db TH |
2137 | |
2138 | /* | |
2139 | * Already donating or accumulated enough to start. | |
2140 | * Determine the donation amount. | |
2141 | */ | |
ac33e91e | 2142 | current_hweight(iocg, &hwa, &old_hwi); |
93f7d2db | 2143 | hwm = current_hweight_max(iocg); |
ac33e91e TH |
2144 | new_hwi = hweight_after_donation(iocg, old_hwi, hwm, |
2145 | usage, &now); | |
93f7d2db | 2146 | if (new_hwi < hwm) { |
e08d02aa | 2147 | iocg->hweight_donating = hwa; |
93f7d2db | 2148 | iocg->hweight_after_donation = new_hwi; |
8692d2db | 2149 | list_add(&iocg->surplus_list, &surpluses); |
7caa4715 | 2150 | } else { |
04603755 TH |
2151 | TRACE_IOCG_PATH(inuse_shortage, iocg, &now, |
2152 | iocg->inuse, iocg->active, | |
2153 | iocg->hweight_inuse, new_hwi); | |
2154 | ||
93f7d2db | 2155 | __propagate_weights(iocg, iocg->active, |
b0853ab4 | 2156 | iocg->active, true, &now); |
93f7d2db | 2157 | nr_shortages++; |
7caa4715 TH |
2158 | } |
2159 | } else { | |
93f7d2db | 2160 | /* genuinely short on vtime */ |
7caa4715 TH |
2161 | nr_shortages++; |
2162 | } | |
2163 | } | |
2164 | ||
93f7d2db TH |
2165 | if (!list_empty(&surpluses) && nr_shortages) |
2166 | transfer_surpluses(&surpluses, &now); | |
7caa4715 | 2167 | |
00410f1b | 2168 | commit_weights(ioc); |
7caa4715 | 2169 | |
8692d2db TH |
2170 | /* surplus list should be dissolved after use */ |
2171 | list_for_each_entry_safe(iocg, tiocg, &surpluses, surplus_list) | |
2172 | list_del_init(&iocg->surplus_list); | |
2173 | ||
dda1315f TH |
2174 | /* |
2175 | * A low weight iocg can amass a large amount of debt, for example, when | |
2176 | * anonymous memory gets reclaimed aggressively. If the system has a lot | |
2177 | * of memory paired with a slow IO device, the debt can span multiple | |
2178 | * seconds or more. If there are no other subsequent IO issuers, the | |
2179 | * in-debt iocg may end up blocked paying its debt while the IO device | |
2180 | * is idle. | |
2181 | * | |
2182 | * The following protects against such pathological cases. If the device | |
2183 | * has been sufficiently idle for a substantial amount of time, the | |
2184 | * debts are halved. The criteria are on the conservative side as we | |
2185 | * want to resolve the rare extreme cases without impacting regular | |
2186 | * operation by forgiving debts too readily. | |
2187 | */ | |
2188 | if (nr_shortages || | |
2189 | div64_u64(100 * usage_us_sum, now.now - ioc->period_at) >= | |
2190 | DEBT_BUSY_USAGE_PCT) | |
2191 | ioc->debt_busy_at = now.now; | |
2192 | ||
2193 | if (nr_debtors && | |
2194 | now.now - ioc->debt_busy_at >= DEBT_REDUCTION_IDLE_DUR) { | |
2195 | list_for_each_entry(iocg, &ioc->active_iocgs, active_list) { | |
2196 | if (iocg->abs_vdebt) { | |
2197 | spin_lock(&iocg->waitq.lock); | |
2198 | iocg->abs_vdebt /= 2; | |
2199 | iocg_kick_waitq(iocg, true, &now); | |
2200 | spin_unlock(&iocg->waitq.lock); | |
2201 | } | |
2202 | } | |
2203 | ioc->debt_busy_at = now.now; | |
2204 | } | |
2205 | ||
7caa4715 TH |
2206 | /* |
2207 | * If q is getting clogged or we're missing too much, we're issuing | |
2208 | * too much IO and should lower vtime rate. If we're not missing | |
2209 | * and experiencing shortages but not surpluses, we're too stingy | |
2210 | * and should increase vtime rate. | |
2211 | */ | |
25d41e4a | 2212 | prev_busy_level = ioc->busy_level; |
7caa4715 TH |
2213 | if (rq_wait_pct > RQ_WAIT_BUSY_PCT || |
2214 | missed_ppm[READ] > ppm_rthr || | |
2215 | missed_ppm[WRITE] > ppm_wthr) { | |
81ca627a | 2216 | /* clearly missing QoS targets, slow down vrate */ |
7caa4715 TH |
2217 | ioc->busy_level = max(ioc->busy_level, 0); |
2218 | ioc->busy_level++; | |
7cd806a9 | 2219 | } else if (rq_wait_pct <= RQ_WAIT_BUSY_PCT * UNBUSY_THR_PCT / 100 && |
7caa4715 TH |
2220 | missed_ppm[READ] <= ppm_rthr * UNBUSY_THR_PCT / 100 && |
2221 | missed_ppm[WRITE] <= ppm_wthr * UNBUSY_THR_PCT / 100) { | |
81ca627a TH |
2222 | /* QoS targets are being met with >25% margin */ |
2223 | if (nr_shortages) { | |
2224 | /* | |
2225 | * We're throttling while the device has spare | |
2226 | * capacity. If vrate was being slowed down, stop. | |
2227 | */ | |
7cd806a9 | 2228 | ioc->busy_level = min(ioc->busy_level, 0); |
81ca627a TH |
2229 | |
2230 | /* | |
2231 | * If there are IOs spanning multiple periods, wait | |
065655c8 | 2232 | * them out before pushing the device harder. |
81ca627a | 2233 | */ |
065655c8 | 2234 | if (!nr_lagging) |
7cd806a9 | 2235 | ioc->busy_level--; |
81ca627a TH |
2236 | } else { |
2237 | /* | |
2238 | * Nobody is being throttled and the users aren't | |
2239 | * issuing enough IOs to saturate the device. We | |
2240 | * simply don't know how close the device is to | |
2241 | * saturation. Coast. | |
2242 | */ | |
2243 | ioc->busy_level = 0; | |
7cd806a9 | 2244 | } |
7caa4715 | 2245 | } else { |
81ca627a | 2246 | /* inside the hysterisis margin, we're good */ |
7caa4715 TH |
2247 | ioc->busy_level = 0; |
2248 | } | |
2249 | ||
2250 | ioc->busy_level = clamp(ioc->busy_level, -1000, 1000); | |
2251 | ||
7cd806a9 | 2252 | if (ioc->busy_level > 0 || (ioc->busy_level < 0 && !nr_lagging)) { |
ac33e91e | 2253 | u64 vrate = ioc->vtime_base_rate; |
7caa4715 TH |
2254 | u64 vrate_min = ioc->vrate_min, vrate_max = ioc->vrate_max; |
2255 | ||
2256 | /* rq_wait signal is always reliable, ignore user vrate_min */ | |
2257 | if (rq_wait_pct > RQ_WAIT_BUSY_PCT) | |
2258 | vrate_min = VRATE_MIN; | |
2259 | ||
2260 | /* | |
2261 | * If vrate is out of bounds, apply clamp gradually as the | |
2262 | * bounds can change abruptly. Otherwise, apply busy_level | |
2263 | * based adjustment. | |
2264 | */ | |
2265 | if (vrate < vrate_min) { | |
2266 | vrate = div64_u64(vrate * (100 + VRATE_CLAMP_ADJ_PCT), | |
2267 | 100); | |
2268 | vrate = min(vrate, vrate_min); | |
2269 | } else if (vrate > vrate_max) { | |
2270 | vrate = div64_u64(vrate * (100 - VRATE_CLAMP_ADJ_PCT), | |
2271 | 100); | |
2272 | vrate = max(vrate, vrate_max); | |
2273 | } else { | |
2274 | int idx = min_t(int, abs(ioc->busy_level), | |
2275 | ARRAY_SIZE(vrate_adj_pct) - 1); | |
2276 | u32 adj_pct = vrate_adj_pct[idx]; | |
2277 | ||
2278 | if (ioc->busy_level > 0) | |
2279 | adj_pct = 100 - adj_pct; | |
2280 | else | |
2281 | adj_pct = 100 + adj_pct; | |
2282 | ||
2283 | vrate = clamp(DIV64_U64_ROUND_UP(vrate * adj_pct, 100), | |
2284 | vrate_min, vrate_max); | |
2285 | } | |
2286 | ||
d6c8e949 | 2287 | trace_iocost_ioc_vrate_adj(ioc, vrate, missed_ppm, rq_wait_pct, |
065655c8 | 2288 | nr_lagging, nr_shortages); |
7caa4715 | 2289 | |
ac33e91e | 2290 | ioc->vtime_base_rate = vrate; |
7ca5b2e6 | 2291 | ioc_refresh_margins(ioc); |
25d41e4a TH |
2292 | } else if (ioc->busy_level != prev_busy_level || nr_lagging) { |
2293 | trace_iocost_ioc_vrate_adj(ioc, atomic64_read(&ioc->vtime_rate), | |
d6c8e949 | 2294 | missed_ppm, rq_wait_pct, nr_lagging, |
065655c8 | 2295 | nr_shortages); |
7caa4715 TH |
2296 | } |
2297 | ||
2298 | ioc_refresh_params(ioc, false); | |
2299 | ||
2300 | /* | |
2301 | * This period is done. Move onto the next one. If nothing's | |
2302 | * going on with the device, stop the timer. | |
2303 | */ | |
2304 | atomic64_inc(&ioc->cur_period); | |
2305 | ||
2306 | if (ioc->running != IOC_STOP) { | |
2307 | if (!list_empty(&ioc->active_iocgs)) { | |
2308 | ioc_start_period(ioc, &now); | |
2309 | } else { | |
2310 | ioc->busy_level = 0; | |
ac33e91e | 2311 | ioc->vtime_err = 0; |
7caa4715 TH |
2312 | ioc->running = IOC_IDLE; |
2313 | } | |
ac33e91e TH |
2314 | |
2315 | ioc_refresh_vrate(ioc, &now); | |
7caa4715 TH |
2316 | } |
2317 | ||
2318 | spin_unlock_irq(&ioc->lock); | |
2319 | } | |
2320 | ||
b0853ab4 TH |
2321 | static u64 adjust_inuse_and_calc_cost(struct ioc_gq *iocg, u64 vtime, |
2322 | u64 abs_cost, struct ioc_now *now) | |
2323 | { | |
2324 | struct ioc *ioc = iocg->ioc; | |
2325 | struct ioc_margins *margins = &ioc->margins; | |
2326 | u32 adj_step = DIV_ROUND_UP(iocg->active * INUSE_ADJ_STEP_PCT, 100); | |
04603755 | 2327 | u32 __maybe_unused old_inuse = iocg->inuse, __maybe_unused old_hwi; |
b0853ab4 TH |
2328 | u32 hwi; |
2329 | s64 margin; | |
2330 | u64 cost, new_inuse; | |
2331 | ||
2332 | current_hweight(iocg, NULL, &hwi); | |
04603755 | 2333 | old_hwi = hwi; |
b0853ab4 TH |
2334 | cost = abs_cost_to_cost(abs_cost, hwi); |
2335 | margin = now->vnow - vtime - cost; | |
2336 | ||
c421a3eb TH |
2337 | /* debt handling owns inuse for debtors */ |
2338 | if (iocg->abs_vdebt) | |
2339 | return cost; | |
2340 | ||
b0853ab4 TH |
2341 | /* |
2342 | * We only increase inuse during period and do so iff the margin has | |
2343 | * deteriorated since the previous adjustment. | |
2344 | */ | |
2345 | if (margin >= iocg->saved_margin || margin >= margins->low || | |
2346 | iocg->inuse == iocg->active) | |
2347 | return cost; | |
2348 | ||
2349 | spin_lock_irq(&ioc->lock); | |
2350 | ||
2351 | /* we own inuse only when @iocg is in the normal active state */ | |
c421a3eb | 2352 | if (iocg->abs_vdebt || list_empty(&iocg->active_list)) { |
b0853ab4 TH |
2353 | spin_unlock_irq(&ioc->lock); |
2354 | return cost; | |
2355 | } | |
2356 | ||
2357 | /* bump up inuse till @abs_cost fits in the existing budget */ | |
2358 | new_inuse = iocg->inuse; | |
2359 | do { | |
2360 | new_inuse = new_inuse + adj_step; | |
2361 | propagate_weights(iocg, iocg->active, new_inuse, true, now); | |
2362 | current_hweight(iocg, NULL, &hwi); | |
2363 | cost = abs_cost_to_cost(abs_cost, hwi); | |
2364 | } while (time_after64(vtime + cost, now->vnow) && | |
2365 | iocg->inuse != iocg->active); | |
2366 | ||
2367 | spin_unlock_irq(&ioc->lock); | |
04603755 TH |
2368 | |
2369 | TRACE_IOCG_PATH(inuse_adjust, iocg, now, | |
2370 | old_inuse, iocg->inuse, old_hwi, hwi); | |
2371 | ||
b0853ab4 TH |
2372 | return cost; |
2373 | } | |
2374 | ||
7caa4715 TH |
2375 | static void calc_vtime_cost_builtin(struct bio *bio, struct ioc_gq *iocg, |
2376 | bool is_merge, u64 *costp) | |
2377 | { | |
2378 | struct ioc *ioc = iocg->ioc; | |
2379 | u64 coef_seqio, coef_randio, coef_page; | |
2380 | u64 pages = max_t(u64, bio_sectors(bio) >> IOC_SECT_TO_PAGE_SHIFT, 1); | |
2381 | u64 seek_pages = 0; | |
2382 | u64 cost = 0; | |
2383 | ||
2384 | switch (bio_op(bio)) { | |
2385 | case REQ_OP_READ: | |
2386 | coef_seqio = ioc->params.lcoefs[LCOEF_RSEQIO]; | |
2387 | coef_randio = ioc->params.lcoefs[LCOEF_RRANDIO]; | |
2388 | coef_page = ioc->params.lcoefs[LCOEF_RPAGE]; | |
2389 | break; | |
2390 | case REQ_OP_WRITE: | |
2391 | coef_seqio = ioc->params.lcoefs[LCOEF_WSEQIO]; | |
2392 | coef_randio = ioc->params.lcoefs[LCOEF_WRANDIO]; | |
2393 | coef_page = ioc->params.lcoefs[LCOEF_WPAGE]; | |
2394 | break; | |
2395 | default: | |
2396 | goto out; | |
2397 | } | |
2398 | ||
2399 | if (iocg->cursor) { | |
2400 | seek_pages = abs(bio->bi_iter.bi_sector - iocg->cursor); | |
2401 | seek_pages >>= IOC_SECT_TO_PAGE_SHIFT; | |
2402 | } | |
2403 | ||
2404 | if (!is_merge) { | |
2405 | if (seek_pages > LCOEF_RANDIO_PAGES) { | |
2406 | cost += coef_randio; | |
2407 | } else { | |
2408 | cost += coef_seqio; | |
2409 | } | |
2410 | } | |
2411 | cost += pages * coef_page; | |
2412 | out: | |
2413 | *costp = cost; | |
2414 | } | |
2415 | ||
2416 | static u64 calc_vtime_cost(struct bio *bio, struct ioc_gq *iocg, bool is_merge) | |
2417 | { | |
2418 | u64 cost; | |
2419 | ||
2420 | calc_vtime_cost_builtin(bio, iocg, is_merge, &cost); | |
2421 | return cost; | |
2422 | } | |
2423 | ||
cd006509 TH |
2424 | static void calc_size_vtime_cost_builtin(struct request *rq, struct ioc *ioc, |
2425 | u64 *costp) | |
2426 | { | |
2427 | unsigned int pages = blk_rq_stats_sectors(rq) >> IOC_SECT_TO_PAGE_SHIFT; | |
2428 | ||
2429 | switch (req_op(rq)) { | |
2430 | case REQ_OP_READ: | |
2431 | *costp = pages * ioc->params.lcoefs[LCOEF_RPAGE]; | |
2432 | break; | |
2433 | case REQ_OP_WRITE: | |
2434 | *costp = pages * ioc->params.lcoefs[LCOEF_WPAGE]; | |
2435 | break; | |
2436 | default: | |
2437 | *costp = 0; | |
2438 | } | |
2439 | } | |
2440 | ||
2441 | static u64 calc_size_vtime_cost(struct request *rq, struct ioc *ioc) | |
2442 | { | |
2443 | u64 cost; | |
2444 | ||
2445 | calc_size_vtime_cost_builtin(rq, ioc, &cost); | |
2446 | return cost; | |
2447 | } | |
2448 | ||
7caa4715 TH |
2449 | static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio) |
2450 | { | |
2451 | struct blkcg_gq *blkg = bio->bi_blkg; | |
2452 | struct ioc *ioc = rqos_to_ioc(rqos); | |
2453 | struct ioc_gq *iocg = blkg_to_iocg(blkg); | |
2454 | struct ioc_now now; | |
2455 | struct iocg_wait wait; | |
7caa4715 | 2456 | u64 abs_cost, cost, vtime; |
da437b95 TH |
2457 | bool use_debt, ioc_locked; |
2458 | unsigned long flags; | |
7caa4715 TH |
2459 | |
2460 | /* bypass IOs if disabled or for root cgroup */ | |
2461 | if (!ioc->enabled || !iocg->level) | |
2462 | return; | |
2463 | ||
7caa4715 TH |
2464 | /* calculate the absolute vtime cost */ |
2465 | abs_cost = calc_vtime_cost(bio, iocg, false); | |
2466 | if (!abs_cost) | |
2467 | return; | |
2468 | ||
f1de2439 TH |
2469 | if (!iocg_activate(iocg, &now)) |
2470 | return; | |
2471 | ||
7caa4715 | 2472 | iocg->cursor = bio_end_sector(bio); |
7caa4715 | 2473 | vtime = atomic64_read(&iocg->vtime); |
b0853ab4 | 2474 | cost = adjust_inuse_and_calc_cost(iocg, vtime, abs_cost, &now); |
7caa4715 TH |
2475 | |
2476 | /* | |
2477 | * If no one's waiting and within budget, issue right away. The | |
2478 | * tests are racy but the races aren't systemic - we only miss once | |
2479 | * in a while which is fine. | |
2480 | */ | |
0b80f986 | 2481 | if (!waitqueue_active(&iocg->waitq) && !iocg->abs_vdebt && |
7caa4715 | 2482 | time_before_eq64(vtime + cost, now.vnow)) { |
97eb1975 | 2483 | iocg_commit_bio(iocg, bio, abs_cost, cost); |
7caa4715 TH |
2484 | return; |
2485 | } | |
2486 | ||
36a52481 | 2487 | /* |
da437b95 TH |
2488 | * We're over budget. This can be handled in two ways. IOs which may |
2489 | * cause priority inversions are punted to @ioc->aux_iocg and charged as | |
2490 | * debt. Otherwise, the issuer is blocked on @iocg->waitq. Debt handling | |
2491 | * requires @ioc->lock, waitq handling @iocg->waitq.lock. Determine | |
2492 | * whether debt handling is needed and acquire locks accordingly. | |
0b80f986 | 2493 | */ |
da437b95 TH |
2494 | use_debt = bio_issue_as_root_blkg(bio) || fatal_signal_pending(current); |
2495 | ioc_locked = use_debt || READ_ONCE(iocg->abs_vdebt); | |
b0853ab4 | 2496 | retry_lock: |
da437b95 TH |
2497 | iocg_lock(iocg, ioc_locked, &flags); |
2498 | ||
2499 | /* | |
2500 | * @iocg must stay activated for debt and waitq handling. Deactivation | |
2501 | * is synchronized against both ioc->lock and waitq.lock and we won't | |
2502 | * get deactivated as long as we're waiting or has debt, so we're good | |
2503 | * if we're activated here. In the unlikely cases that we aren't, just | |
2504 | * issue the IO. | |
2505 | */ | |
0b80f986 | 2506 | if (unlikely(list_empty(&iocg->active_list))) { |
da437b95 | 2507 | iocg_unlock(iocg, ioc_locked, &flags); |
97eb1975 | 2508 | iocg_commit_bio(iocg, bio, abs_cost, cost); |
0b80f986 TH |
2509 | return; |
2510 | } | |
2511 | ||
2512 | /* | |
2513 | * We're over budget. If @bio has to be issued regardless, remember | |
2514 | * the abs_cost instead of advancing vtime. iocg_kick_waitq() will pay | |
2515 | * off the debt before waking more IOs. | |
2516 | * | |
36a52481 | 2517 | * This way, the debt is continuously paid off each period with the |
0b80f986 TH |
2518 | * actual budget available to the cgroup. If we just wound vtime, we |
2519 | * would incorrectly use the current hw_inuse for the entire amount | |
2520 | * which, for example, can lead to the cgroup staying blocked for a | |
2521 | * long time even with substantially raised hw_inuse. | |
2522 | * | |
2523 | * An iocg with vdebt should stay online so that the timer can keep | |
2524 | * deducting its vdebt and [de]activate use_delay mechanism | |
2525 | * accordingly. We don't want to race against the timer trying to | |
2526 | * clear them and leave @iocg inactive w/ dangling use_delay heavily | |
2527 | * penalizing the cgroup and its descendants. | |
36a52481 | 2528 | */ |
da437b95 | 2529 | if (use_debt) { |
c421a3eb | 2530 | iocg_incur_debt(iocg, abs_cost, &now); |
54c52e10 | 2531 | if (iocg_kick_delay(iocg, &now)) |
d7bd15a1 TH |
2532 | blkcg_schedule_throttle(rqos->q, |
2533 | (bio->bi_opf & REQ_SWAP) == REQ_SWAP); | |
da437b95 | 2534 | iocg_unlock(iocg, ioc_locked, &flags); |
7caa4715 TH |
2535 | return; |
2536 | } | |
2537 | ||
b0853ab4 | 2538 | /* guarantee that iocgs w/ waiters have maximum inuse */ |
c421a3eb | 2539 | if (!iocg->abs_vdebt && iocg->inuse != iocg->active) { |
b0853ab4 TH |
2540 | if (!ioc_locked) { |
2541 | iocg_unlock(iocg, false, &flags); | |
2542 | ioc_locked = true; | |
2543 | goto retry_lock; | |
2544 | } | |
2545 | propagate_weights(iocg, iocg->active, iocg->active, true, | |
2546 | &now); | |
2547 | } | |
2548 | ||
7caa4715 TH |
2549 | /* |
2550 | * Append self to the waitq and schedule the wakeup timer if we're | |
2551 | * the first waiter. The timer duration is calculated based on the | |
2552 | * current vrate. vtime and hweight changes can make it too short | |
2553 | * or too long. Each wait entry records the absolute cost it's | |
2554 | * waiting for to allow re-evaluation using a custom wait entry. | |
2555 | * | |
2556 | * If too short, the timer simply reschedules itself. If too long, | |
2557 | * the period timer will notice and trigger wakeups. | |
2558 | * | |
2559 | * All waiters are on iocg->waitq and the wait states are | |
2560 | * synchronized using waitq.lock. | |
2561 | */ | |
7caa4715 TH |
2562 | init_waitqueue_func_entry(&wait.wait, iocg_wake_fn); |
2563 | wait.wait.private = current; | |
2564 | wait.bio = bio; | |
2565 | wait.abs_cost = abs_cost; | |
2566 | wait.committed = false; /* will be set true by waker */ | |
2567 | ||
2568 | __add_wait_queue_entry_tail(&iocg->waitq, &wait.wait); | |
da437b95 | 2569 | iocg_kick_waitq(iocg, ioc_locked, &now); |
7caa4715 | 2570 | |
da437b95 | 2571 | iocg_unlock(iocg, ioc_locked, &flags); |
7caa4715 TH |
2572 | |
2573 | while (true) { | |
2574 | set_current_state(TASK_UNINTERRUPTIBLE); | |
2575 | if (wait.committed) | |
2576 | break; | |
2577 | io_schedule(); | |
2578 | } | |
2579 | ||
2580 | /* waker already committed us, proceed */ | |
2581 | finish_wait(&iocg->waitq, &wait.wait); | |
2582 | } | |
2583 | ||
2584 | static void ioc_rqos_merge(struct rq_qos *rqos, struct request *rq, | |
2585 | struct bio *bio) | |
2586 | { | |
2587 | struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg); | |
e1518f63 | 2588 | struct ioc *ioc = iocg->ioc; |
7caa4715 | 2589 | sector_t bio_end = bio_end_sector(bio); |
e1518f63 | 2590 | struct ioc_now now; |
b0853ab4 | 2591 | u64 vtime, abs_cost, cost; |
0b80f986 | 2592 | unsigned long flags; |
7caa4715 | 2593 | |
e1518f63 TH |
2594 | /* bypass if disabled or for root cgroup */ |
2595 | if (!ioc->enabled || !iocg->level) | |
7caa4715 TH |
2596 | return; |
2597 | ||
2598 | abs_cost = calc_vtime_cost(bio, iocg, true); | |
2599 | if (!abs_cost) | |
2600 | return; | |
2601 | ||
e1518f63 | 2602 | ioc_now(ioc, &now); |
b0853ab4 TH |
2603 | |
2604 | vtime = atomic64_read(&iocg->vtime); | |
2605 | cost = adjust_inuse_and_calc_cost(iocg, vtime, abs_cost, &now); | |
e1518f63 | 2606 | |
7caa4715 TH |
2607 | /* update cursor if backmerging into the request at the cursor */ |
2608 | if (blk_rq_pos(rq) < bio_end && | |
2609 | blk_rq_pos(rq) + blk_rq_sectors(rq) == iocg->cursor) | |
2610 | iocg->cursor = bio_end; | |
2611 | ||
e1518f63 | 2612 | /* |
0b80f986 TH |
2613 | * Charge if there's enough vtime budget and the existing request has |
2614 | * cost assigned. | |
e1518f63 TH |
2615 | */ |
2616 | if (rq->bio && rq->bio->bi_iocost_cost && | |
0b80f986 | 2617 | time_before_eq64(atomic64_read(&iocg->vtime) + cost, now.vnow)) { |
97eb1975 | 2618 | iocg_commit_bio(iocg, bio, abs_cost, cost); |
0b80f986 TH |
2619 | return; |
2620 | } | |
2621 | ||
2622 | /* | |
2623 | * Otherwise, account it as debt if @iocg is online, which it should | |
2624 | * be for the vast majority of cases. See debt handling in | |
2625 | * ioc_rqos_throttle() for details. | |
2626 | */ | |
c421a3eb TH |
2627 | spin_lock_irqsave(&ioc->lock, flags); |
2628 | spin_lock(&iocg->waitq.lock); | |
2629 | ||
0b80f986 | 2630 | if (likely(!list_empty(&iocg->active_list))) { |
c421a3eb TH |
2631 | iocg_incur_debt(iocg, abs_cost, &now); |
2632 | if (iocg_kick_delay(iocg, &now)) | |
2633 | blkcg_schedule_throttle(rqos->q, | |
2634 | (bio->bi_opf & REQ_SWAP) == REQ_SWAP); | |
0b80f986 | 2635 | } else { |
97eb1975 | 2636 | iocg_commit_bio(iocg, bio, abs_cost, cost); |
0b80f986 | 2637 | } |
c421a3eb TH |
2638 | |
2639 | spin_unlock(&iocg->waitq.lock); | |
2640 | spin_unlock_irqrestore(&ioc->lock, flags); | |
7caa4715 TH |
2641 | } |
2642 | ||
2643 | static void ioc_rqos_done_bio(struct rq_qos *rqos, struct bio *bio) | |
2644 | { | |
2645 | struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg); | |
2646 | ||
2647 | if (iocg && bio->bi_iocost_cost) | |
2648 | atomic64_add(bio->bi_iocost_cost, &iocg->done_vtime); | |
2649 | } | |
2650 | ||
2651 | static void ioc_rqos_done(struct rq_qos *rqos, struct request *rq) | |
2652 | { | |
2653 | struct ioc *ioc = rqos_to_ioc(rqos); | |
5e124f74 | 2654 | struct ioc_pcpu_stat *ccs; |
cd006509 | 2655 | u64 on_q_ns, rq_wait_ns, size_nsec; |
7caa4715 TH |
2656 | int pidx, rw; |
2657 | ||
2658 | if (!ioc->enabled || !rq->alloc_time_ns || !rq->start_time_ns) | |
2659 | return; | |
2660 | ||
2661 | switch (req_op(rq) & REQ_OP_MASK) { | |
2662 | case REQ_OP_READ: | |
2663 | pidx = QOS_RLAT; | |
2664 | rw = READ; | |
2665 | break; | |
2666 | case REQ_OP_WRITE: | |
2667 | pidx = QOS_WLAT; | |
2668 | rw = WRITE; | |
2669 | break; | |
2670 | default: | |
2671 | return; | |
2672 | } | |
2673 | ||
2674 | on_q_ns = ktime_get_ns() - rq->alloc_time_ns; | |
2675 | rq_wait_ns = rq->start_time_ns - rq->alloc_time_ns; | |
cd006509 | 2676 | size_nsec = div64_u64(calc_size_vtime_cost(rq, ioc), VTIME_PER_NSEC); |
7caa4715 | 2677 | |
5e124f74 TH |
2678 | ccs = get_cpu_ptr(ioc->pcpu_stat); |
2679 | ||
cd006509 TH |
2680 | if (on_q_ns <= size_nsec || |
2681 | on_q_ns - size_nsec <= ioc->params.qos[pidx] * NSEC_PER_USEC) | |
5e124f74 | 2682 | local_inc(&ccs->missed[rw].nr_met); |
7caa4715 | 2683 | else |
5e124f74 TH |
2684 | local_inc(&ccs->missed[rw].nr_missed); |
2685 | ||
2686 | local64_add(rq_wait_ns, &ccs->rq_wait_ns); | |
7caa4715 | 2687 | |
5e124f74 | 2688 | put_cpu_ptr(ccs); |
7caa4715 TH |
2689 | } |
2690 | ||
2691 | static void ioc_rqos_queue_depth_changed(struct rq_qos *rqos) | |
2692 | { | |
2693 | struct ioc *ioc = rqos_to_ioc(rqos); | |
2694 | ||
2695 | spin_lock_irq(&ioc->lock); | |
2696 | ioc_refresh_params(ioc, false); | |
2697 | spin_unlock_irq(&ioc->lock); | |
2698 | } | |
2699 | ||
2700 | static void ioc_rqos_exit(struct rq_qos *rqos) | |
2701 | { | |
2702 | struct ioc *ioc = rqos_to_ioc(rqos); | |
2703 | ||
2704 | blkcg_deactivate_policy(rqos->q, &blkcg_policy_iocost); | |
2705 | ||
2706 | spin_lock_irq(&ioc->lock); | |
2707 | ioc->running = IOC_STOP; | |
2708 | spin_unlock_irq(&ioc->lock); | |
2709 | ||
2710 | del_timer_sync(&ioc->timer); | |
2711 | free_percpu(ioc->pcpu_stat); | |
2712 | kfree(ioc); | |
2713 | } | |
2714 | ||
2715 | static struct rq_qos_ops ioc_rqos_ops = { | |
2716 | .throttle = ioc_rqos_throttle, | |
2717 | .merge = ioc_rqos_merge, | |
2718 | .done_bio = ioc_rqos_done_bio, | |
2719 | .done = ioc_rqos_done, | |
2720 | .queue_depth_changed = ioc_rqos_queue_depth_changed, | |
2721 | .exit = ioc_rqos_exit, | |
2722 | }; | |
2723 | ||
2724 | static int blk_iocost_init(struct request_queue *q) | |
2725 | { | |
2726 | struct ioc *ioc; | |
2727 | struct rq_qos *rqos; | |
5e124f74 | 2728 | int i, cpu, ret; |
7caa4715 TH |
2729 | |
2730 | ioc = kzalloc(sizeof(*ioc), GFP_KERNEL); | |
2731 | if (!ioc) | |
2732 | return -ENOMEM; | |
2733 | ||
2734 | ioc->pcpu_stat = alloc_percpu(struct ioc_pcpu_stat); | |
2735 | if (!ioc->pcpu_stat) { | |
2736 | kfree(ioc); | |
2737 | return -ENOMEM; | |
2738 | } | |
2739 | ||
5e124f74 TH |
2740 | for_each_possible_cpu(cpu) { |
2741 | struct ioc_pcpu_stat *ccs = per_cpu_ptr(ioc->pcpu_stat, cpu); | |
2742 | ||
2743 | for (i = 0; i < ARRAY_SIZE(ccs->missed); i++) { | |
2744 | local_set(&ccs->missed[i].nr_met, 0); | |
2745 | local_set(&ccs->missed[i].nr_missed, 0); | |
2746 | } | |
2747 | local64_set(&ccs->rq_wait_ns, 0); | |
2748 | } | |
2749 | ||
7caa4715 TH |
2750 | rqos = &ioc->rqos; |
2751 | rqos->id = RQ_QOS_COST; | |
2752 | rqos->ops = &ioc_rqos_ops; | |
2753 | rqos->q = q; | |
2754 | ||
2755 | spin_lock_init(&ioc->lock); | |
2756 | timer_setup(&ioc->timer, ioc_timer_fn, 0); | |
2757 | INIT_LIST_HEAD(&ioc->active_iocgs); | |
2758 | ||
2759 | ioc->running = IOC_IDLE; | |
ac33e91e | 2760 | ioc->vtime_base_rate = VTIME_PER_USEC; |
7caa4715 | 2761 | atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC); |
67b7b641 | 2762 | seqcount_spinlock_init(&ioc->period_seqcount, &ioc->lock); |
7caa4715 TH |
2763 | ioc->period_at = ktime_to_us(ktime_get()); |
2764 | atomic64_set(&ioc->cur_period, 0); | |
2765 | atomic_set(&ioc->hweight_gen, 0); | |
2766 | ||
2767 | spin_lock_irq(&ioc->lock); | |
2768 | ioc->autop_idx = AUTOP_INVALID; | |
2769 | ioc_refresh_params(ioc, true); | |
2770 | spin_unlock_irq(&ioc->lock); | |
2771 | ||
2772 | rq_qos_add(q, rqos); | |
2773 | ret = blkcg_activate_policy(q, &blkcg_policy_iocost); | |
2774 | if (ret) { | |
2775 | rq_qos_del(q, rqos); | |
3532e722 | 2776 | free_percpu(ioc->pcpu_stat); |
7caa4715 TH |
2777 | kfree(ioc); |
2778 | return ret; | |
2779 | } | |
2780 | return 0; | |
2781 | } | |
2782 | ||
2783 | static struct blkcg_policy_data *ioc_cpd_alloc(gfp_t gfp) | |
2784 | { | |
2785 | struct ioc_cgrp *iocc; | |
2786 | ||
2787 | iocc = kzalloc(sizeof(struct ioc_cgrp), gfp); | |
e916ad29 TH |
2788 | if (!iocc) |
2789 | return NULL; | |
7caa4715 | 2790 | |
bd0adb91 | 2791 | iocc->dfl_weight = CGROUP_WEIGHT_DFL * WEIGHT_ONE; |
7caa4715 TH |
2792 | return &iocc->cpd; |
2793 | } | |
2794 | ||
2795 | static void ioc_cpd_free(struct blkcg_policy_data *cpd) | |
2796 | { | |
2797 | kfree(container_of(cpd, struct ioc_cgrp, cpd)); | |
2798 | } | |
2799 | ||
2800 | static struct blkg_policy_data *ioc_pd_alloc(gfp_t gfp, struct request_queue *q, | |
2801 | struct blkcg *blkcg) | |
2802 | { | |
2803 | int levels = blkcg->css.cgroup->level + 1; | |
2804 | struct ioc_gq *iocg; | |
2805 | ||
f61d6e25 | 2806 | iocg = kzalloc_node(struct_size(iocg, ancestors, levels), gfp, q->node); |
7caa4715 TH |
2807 | if (!iocg) |
2808 | return NULL; | |
2809 | ||
97eb1975 TH |
2810 | iocg->pcpu_stat = alloc_percpu_gfp(struct iocg_pcpu_stat, gfp); |
2811 | if (!iocg->pcpu_stat) { | |
2812 | kfree(iocg); | |
2813 | return NULL; | |
2814 | } | |
2815 | ||
7caa4715 TH |
2816 | return &iocg->pd; |
2817 | } | |
2818 | ||
2819 | static void ioc_pd_init(struct blkg_policy_data *pd) | |
2820 | { | |
2821 | struct ioc_gq *iocg = pd_to_iocg(pd); | |
2822 | struct blkcg_gq *blkg = pd_to_blkg(&iocg->pd); | |
2823 | struct ioc *ioc = q_to_ioc(blkg->q); | |
2824 | struct ioc_now now; | |
2825 | struct blkcg_gq *tblkg; | |
2826 | unsigned long flags; | |
2827 | ||
2828 | ioc_now(ioc, &now); | |
2829 | ||
2830 | iocg->ioc = ioc; | |
2831 | atomic64_set(&iocg->vtime, now.vnow); | |
2832 | atomic64_set(&iocg->done_vtime, now.vnow); | |
2833 | atomic64_set(&iocg->active_period, atomic64_read(&ioc->cur_period)); | |
2834 | INIT_LIST_HEAD(&iocg->active_list); | |
97eb1975 | 2835 | INIT_LIST_HEAD(&iocg->walk_list); |
8692d2db | 2836 | INIT_LIST_HEAD(&iocg->surplus_list); |
fe20cdb5 TH |
2837 | iocg->hweight_active = WEIGHT_ONE; |
2838 | iocg->hweight_inuse = WEIGHT_ONE; | |
7caa4715 TH |
2839 | |
2840 | init_waitqueue_head(&iocg->waitq); | |
2841 | hrtimer_init(&iocg->waitq_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | |
2842 | iocg->waitq_timer.function = iocg_waitq_timer_fn; | |
7caa4715 TH |
2843 | |
2844 | iocg->level = blkg->blkcg->css.cgroup->level; | |
2845 | ||
2846 | for (tblkg = blkg; tblkg; tblkg = tblkg->parent) { | |
2847 | struct ioc_gq *tiocg = blkg_to_iocg(tblkg); | |
2848 | iocg->ancestors[tiocg->level] = tiocg; | |
2849 | } | |
2850 | ||
2851 | spin_lock_irqsave(&ioc->lock, flags); | |
b0853ab4 | 2852 | weight_updated(iocg, &now); |
7caa4715 TH |
2853 | spin_unlock_irqrestore(&ioc->lock, flags); |
2854 | } | |
2855 | ||
2856 | static void ioc_pd_free(struct blkg_policy_data *pd) | |
2857 | { | |
2858 | struct ioc_gq *iocg = pd_to_iocg(pd); | |
2859 | struct ioc *ioc = iocg->ioc; | |
5aeac7c4 | 2860 | unsigned long flags; |
7caa4715 TH |
2861 | |
2862 | if (ioc) { | |
5aeac7c4 | 2863 | spin_lock_irqsave(&ioc->lock, flags); |
97eb1975 | 2864 | |
7caa4715 | 2865 | if (!list_empty(&iocg->active_list)) { |
b0853ab4 TH |
2866 | struct ioc_now now; |
2867 | ||
2868 | ioc_now(ioc, &now); | |
2869 | propagate_weights(iocg, 0, 0, false, &now); | |
7caa4715 TH |
2870 | list_del_init(&iocg->active_list); |
2871 | } | |
97eb1975 TH |
2872 | |
2873 | WARN_ON_ONCE(!list_empty(&iocg->walk_list)); | |
8692d2db | 2874 | WARN_ON_ONCE(!list_empty(&iocg->surplus_list)); |
97eb1975 | 2875 | |
5aeac7c4 | 2876 | spin_unlock_irqrestore(&ioc->lock, flags); |
e036c4ca TH |
2877 | |
2878 | hrtimer_cancel(&iocg->waitq_timer); | |
7caa4715 | 2879 | } |
97eb1975 | 2880 | free_percpu(iocg->pcpu_stat); |
7caa4715 TH |
2881 | kfree(iocg); |
2882 | } | |
2883 | ||
97eb1975 TH |
2884 | static size_t ioc_pd_stat(struct blkg_policy_data *pd, char *buf, size_t size) |
2885 | { | |
2886 | struct ioc_gq *iocg = pd_to_iocg(pd); | |
2887 | struct ioc *ioc = iocg->ioc; | |
2888 | size_t pos = 0; | |
2889 | ||
2890 | if (!ioc->enabled) | |
2891 | return 0; | |
2892 | ||
2893 | if (iocg->level == 0) { | |
2894 | unsigned vp10k = DIV64_U64_ROUND_CLOSEST( | |
ac33e91e | 2895 | ioc->vtime_base_rate * 10000, |
97eb1975 TH |
2896 | VTIME_PER_USEC); |
2897 | pos += scnprintf(buf + pos, size - pos, " cost.vrate=%u.%02u", | |
2898 | vp10k / 100, vp10k % 100); | |
2899 | } | |
2900 | ||
2901 | pos += scnprintf(buf + pos, size - pos, " cost.usage=%llu", | |
2902 | iocg->last_stat.usage_us); | |
2903 | ||
f0bf84a5 TH |
2904 | if (blkcg_debug_stats) |
2905 | pos += scnprintf(buf + pos, size - pos, | |
2906 | " cost.wait=%llu cost.indebt=%llu cost.indelay=%llu", | |
2907 | iocg->last_stat.wait_us, | |
2908 | iocg->last_stat.indebt_us, | |
2909 | iocg->last_stat.indelay_us); | |
2910 | ||
97eb1975 TH |
2911 | return pos; |
2912 | } | |
2913 | ||
7caa4715 TH |
2914 | static u64 ioc_weight_prfill(struct seq_file *sf, struct blkg_policy_data *pd, |
2915 | int off) | |
2916 | { | |
2917 | const char *dname = blkg_dev_name(pd->blkg); | |
2918 | struct ioc_gq *iocg = pd_to_iocg(pd); | |
2919 | ||
2920 | if (dname && iocg->cfg_weight) | |
bd0adb91 | 2921 | seq_printf(sf, "%s %u\n", dname, iocg->cfg_weight / WEIGHT_ONE); |
7caa4715 TH |
2922 | return 0; |
2923 | } | |
2924 | ||
2925 | ||
2926 | static int ioc_weight_show(struct seq_file *sf, void *v) | |
2927 | { | |
2928 | struct blkcg *blkcg = css_to_blkcg(seq_css(sf)); | |
2929 | struct ioc_cgrp *iocc = blkcg_to_iocc(blkcg); | |
2930 | ||
bd0adb91 | 2931 | seq_printf(sf, "default %u\n", iocc->dfl_weight / WEIGHT_ONE); |
7caa4715 TH |
2932 | blkcg_print_blkgs(sf, blkcg, ioc_weight_prfill, |
2933 | &blkcg_policy_iocost, seq_cft(sf)->private, false); | |
2934 | return 0; | |
2935 | } | |
2936 | ||
2937 | static ssize_t ioc_weight_write(struct kernfs_open_file *of, char *buf, | |
2938 | size_t nbytes, loff_t off) | |
2939 | { | |
2940 | struct blkcg *blkcg = css_to_blkcg(of_css(of)); | |
2941 | struct ioc_cgrp *iocc = blkcg_to_iocc(blkcg); | |
2942 | struct blkg_conf_ctx ctx; | |
b0853ab4 | 2943 | struct ioc_now now; |
7caa4715 TH |
2944 | struct ioc_gq *iocg; |
2945 | u32 v; | |
2946 | int ret; | |
2947 | ||
2948 | if (!strchr(buf, ':')) { | |
2949 | struct blkcg_gq *blkg; | |
2950 | ||
2951 | if (!sscanf(buf, "default %u", &v) && !sscanf(buf, "%u", &v)) | |
2952 | return -EINVAL; | |
2953 | ||
2954 | if (v < CGROUP_WEIGHT_MIN || v > CGROUP_WEIGHT_MAX) | |
2955 | return -EINVAL; | |
2956 | ||
2957 | spin_lock(&blkcg->lock); | |
bd0adb91 | 2958 | iocc->dfl_weight = v * WEIGHT_ONE; |
7caa4715 TH |
2959 | hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) { |
2960 | struct ioc_gq *iocg = blkg_to_iocg(blkg); | |
2961 | ||
2962 | if (iocg) { | |
2963 | spin_lock_irq(&iocg->ioc->lock); | |
b0853ab4 TH |
2964 | ioc_now(iocg->ioc, &now); |
2965 | weight_updated(iocg, &now); | |
7caa4715 TH |
2966 | spin_unlock_irq(&iocg->ioc->lock); |
2967 | } | |
2968 | } | |
2969 | spin_unlock(&blkcg->lock); | |
2970 | ||
2971 | return nbytes; | |
2972 | } | |
2973 | ||
2974 | ret = blkg_conf_prep(blkcg, &blkcg_policy_iocost, buf, &ctx); | |
2975 | if (ret) | |
2976 | return ret; | |
2977 | ||
2978 | iocg = blkg_to_iocg(ctx.blkg); | |
2979 | ||
2980 | if (!strncmp(ctx.body, "default", 7)) { | |
2981 | v = 0; | |
2982 | } else { | |
2983 | if (!sscanf(ctx.body, "%u", &v)) | |
2984 | goto einval; | |
2985 | if (v < CGROUP_WEIGHT_MIN || v > CGROUP_WEIGHT_MAX) | |
2986 | goto einval; | |
2987 | } | |
2988 | ||
41591a51 | 2989 | spin_lock(&iocg->ioc->lock); |
bd0adb91 | 2990 | iocg->cfg_weight = v * WEIGHT_ONE; |
b0853ab4 TH |
2991 | ioc_now(iocg->ioc, &now); |
2992 | weight_updated(iocg, &now); | |
41591a51 | 2993 | spin_unlock(&iocg->ioc->lock); |
7caa4715 TH |
2994 | |
2995 | blkg_conf_finish(&ctx); | |
2996 | return nbytes; | |
2997 | ||
2998 | einval: | |
2999 | blkg_conf_finish(&ctx); | |
3000 | return -EINVAL; | |
3001 | } | |
3002 | ||
3003 | static u64 ioc_qos_prfill(struct seq_file *sf, struct blkg_policy_data *pd, | |
3004 | int off) | |
3005 | { | |
3006 | const char *dname = blkg_dev_name(pd->blkg); | |
3007 | struct ioc *ioc = pd_to_iocg(pd)->ioc; | |
3008 | ||
3009 | if (!dname) | |
3010 | return 0; | |
3011 | ||
3012 | seq_printf(sf, "%s enable=%d ctrl=%s rpct=%u.%02u rlat=%u wpct=%u.%02u wlat=%u min=%u.%02u max=%u.%02u\n", | |
3013 | dname, ioc->enabled, ioc->user_qos_params ? "user" : "auto", | |
3014 | ioc->params.qos[QOS_RPPM] / 10000, | |
3015 | ioc->params.qos[QOS_RPPM] % 10000 / 100, | |
3016 | ioc->params.qos[QOS_RLAT], | |
3017 | ioc->params.qos[QOS_WPPM] / 10000, | |
3018 | ioc->params.qos[QOS_WPPM] % 10000 / 100, | |
3019 | ioc->params.qos[QOS_WLAT], | |
3020 | ioc->params.qos[QOS_MIN] / 10000, | |
3021 | ioc->params.qos[QOS_MIN] % 10000 / 100, | |
3022 | ioc->params.qos[QOS_MAX] / 10000, | |
3023 | ioc->params.qos[QOS_MAX] % 10000 / 100); | |
3024 | return 0; | |
3025 | } | |
3026 | ||
3027 | static int ioc_qos_show(struct seq_file *sf, void *v) | |
3028 | { | |
3029 | struct blkcg *blkcg = css_to_blkcg(seq_css(sf)); | |
3030 | ||
3031 | blkcg_print_blkgs(sf, blkcg, ioc_qos_prfill, | |
3032 | &blkcg_policy_iocost, seq_cft(sf)->private, false); | |
3033 | return 0; | |
3034 | } | |
3035 | ||
3036 | static const match_table_t qos_ctrl_tokens = { | |
3037 | { QOS_ENABLE, "enable=%u" }, | |
3038 | { QOS_CTRL, "ctrl=%s" }, | |
3039 | { NR_QOS_CTRL_PARAMS, NULL }, | |
3040 | }; | |
3041 | ||
3042 | static const match_table_t qos_tokens = { | |
3043 | { QOS_RPPM, "rpct=%s" }, | |
3044 | { QOS_RLAT, "rlat=%u" }, | |
3045 | { QOS_WPPM, "wpct=%s" }, | |
3046 | { QOS_WLAT, "wlat=%u" }, | |
3047 | { QOS_MIN, "min=%s" }, | |
3048 | { QOS_MAX, "max=%s" }, | |
3049 | { NR_QOS_PARAMS, NULL }, | |
3050 | }; | |
3051 | ||
3052 | static ssize_t ioc_qos_write(struct kernfs_open_file *of, char *input, | |
3053 | size_t nbytes, loff_t off) | |
3054 | { | |
3055 | struct gendisk *disk; | |
3056 | struct ioc *ioc; | |
3057 | u32 qos[NR_QOS_PARAMS]; | |
3058 | bool enable, user; | |
3059 | char *p; | |
3060 | int ret; | |
3061 | ||
3062 | disk = blkcg_conf_get_disk(&input); | |
3063 | if (IS_ERR(disk)) | |
3064 | return PTR_ERR(disk); | |
3065 | ||
3066 | ioc = q_to_ioc(disk->queue); | |
3067 | if (!ioc) { | |
3068 | ret = blk_iocost_init(disk->queue); | |
3069 | if (ret) | |
3070 | goto err; | |
3071 | ioc = q_to_ioc(disk->queue); | |
3072 | } | |
3073 | ||
3074 | spin_lock_irq(&ioc->lock); | |
3075 | memcpy(qos, ioc->params.qos, sizeof(qos)); | |
3076 | enable = ioc->enabled; | |
3077 | user = ioc->user_qos_params; | |
3078 | spin_unlock_irq(&ioc->lock); | |
3079 | ||
3080 | while ((p = strsep(&input, " \t\n"))) { | |
3081 | substring_t args[MAX_OPT_ARGS]; | |
3082 | char buf[32]; | |
3083 | int tok; | |
3084 | s64 v; | |
3085 | ||
3086 | if (!*p) | |
3087 | continue; | |
3088 | ||
3089 | switch (match_token(p, qos_ctrl_tokens, args)) { | |
3090 | case QOS_ENABLE: | |
3091 | match_u64(&args[0], &v); | |
3092 | enable = v; | |
3093 | continue; | |
3094 | case QOS_CTRL: | |
3095 | match_strlcpy(buf, &args[0], sizeof(buf)); | |
3096 | if (!strcmp(buf, "auto")) | |
3097 | user = false; | |
3098 | else if (!strcmp(buf, "user")) | |
3099 | user = true; | |
3100 | else | |
3101 | goto einval; | |
3102 | continue; | |
3103 | } | |
3104 | ||
3105 | tok = match_token(p, qos_tokens, args); | |
3106 | switch (tok) { | |
3107 | case QOS_RPPM: | |
3108 | case QOS_WPPM: | |
3109 | if (match_strlcpy(buf, &args[0], sizeof(buf)) >= | |
3110 | sizeof(buf)) | |
3111 | goto einval; | |
3112 | if (cgroup_parse_float(buf, 2, &v)) | |
3113 | goto einval; | |
3114 | if (v < 0 || v > 10000) | |
3115 | goto einval; | |
3116 | qos[tok] = v * 100; | |
3117 | break; | |
3118 | case QOS_RLAT: | |
3119 | case QOS_WLAT: | |
3120 | if (match_u64(&args[0], &v)) | |
3121 | goto einval; | |
3122 | qos[tok] = v; | |
3123 | break; | |
3124 | case QOS_MIN: | |
3125 | case QOS_MAX: | |
3126 | if (match_strlcpy(buf, &args[0], sizeof(buf)) >= | |
3127 | sizeof(buf)) | |
3128 | goto einval; | |
3129 | if (cgroup_parse_float(buf, 2, &v)) | |
3130 | goto einval; | |
3131 | if (v < 0) | |
3132 | goto einval; | |
3133 | qos[tok] = clamp_t(s64, v * 100, | |
3134 | VRATE_MIN_PPM, VRATE_MAX_PPM); | |
3135 | break; | |
3136 | default: | |
3137 | goto einval; | |
3138 | } | |
3139 | user = true; | |
3140 | } | |
3141 | ||
3142 | if (qos[QOS_MIN] > qos[QOS_MAX]) | |
3143 | goto einval; | |
3144 | ||
3145 | spin_lock_irq(&ioc->lock); | |
3146 | ||
3147 | if (enable) { | |
cd006509 | 3148 | blk_stat_enable_accounting(ioc->rqos.q); |
7caa4715 TH |
3149 | blk_queue_flag_set(QUEUE_FLAG_RQ_ALLOC_TIME, ioc->rqos.q); |
3150 | ioc->enabled = true; | |
3151 | } else { | |
3152 | blk_queue_flag_clear(QUEUE_FLAG_RQ_ALLOC_TIME, ioc->rqos.q); | |
3153 | ioc->enabled = false; | |
3154 | } | |
3155 | ||
3156 | if (user) { | |
3157 | memcpy(ioc->params.qos, qos, sizeof(qos)); | |
3158 | ioc->user_qos_params = true; | |
3159 | } else { | |
3160 | ioc->user_qos_params = false; | |
3161 | } | |
3162 | ||
3163 | ioc_refresh_params(ioc, true); | |
3164 | spin_unlock_irq(&ioc->lock); | |
3165 | ||
3166 | put_disk_and_module(disk); | |
3167 | return nbytes; | |
3168 | einval: | |
3169 | ret = -EINVAL; | |
3170 | err: | |
3171 | put_disk_and_module(disk); | |
3172 | return ret; | |
3173 | } | |
3174 | ||
3175 | static u64 ioc_cost_model_prfill(struct seq_file *sf, | |
3176 | struct blkg_policy_data *pd, int off) | |
3177 | { | |
3178 | const char *dname = blkg_dev_name(pd->blkg); | |
3179 | struct ioc *ioc = pd_to_iocg(pd)->ioc; | |
3180 | u64 *u = ioc->params.i_lcoefs; | |
3181 | ||
3182 | if (!dname) | |
3183 | return 0; | |
3184 | ||
3185 | seq_printf(sf, "%s ctrl=%s model=linear " | |
3186 | "rbps=%llu rseqiops=%llu rrandiops=%llu " | |
3187 | "wbps=%llu wseqiops=%llu wrandiops=%llu\n", | |
3188 | dname, ioc->user_cost_model ? "user" : "auto", | |
3189 | u[I_LCOEF_RBPS], u[I_LCOEF_RSEQIOPS], u[I_LCOEF_RRANDIOPS], | |
3190 | u[I_LCOEF_WBPS], u[I_LCOEF_WSEQIOPS], u[I_LCOEF_WRANDIOPS]); | |
3191 | return 0; | |
3192 | } | |
3193 | ||
3194 | static int ioc_cost_model_show(struct seq_file *sf, void *v) | |
3195 | { | |
3196 | struct blkcg *blkcg = css_to_blkcg(seq_css(sf)); | |
3197 | ||
3198 | blkcg_print_blkgs(sf, blkcg, ioc_cost_model_prfill, | |
3199 | &blkcg_policy_iocost, seq_cft(sf)->private, false); | |
3200 | return 0; | |
3201 | } | |
3202 | ||
3203 | static const match_table_t cost_ctrl_tokens = { | |
3204 | { COST_CTRL, "ctrl=%s" }, | |
3205 | { COST_MODEL, "model=%s" }, | |
3206 | { NR_COST_CTRL_PARAMS, NULL }, | |
3207 | }; | |
3208 | ||
3209 | static const match_table_t i_lcoef_tokens = { | |
3210 | { I_LCOEF_RBPS, "rbps=%u" }, | |
3211 | { I_LCOEF_RSEQIOPS, "rseqiops=%u" }, | |
3212 | { I_LCOEF_RRANDIOPS, "rrandiops=%u" }, | |
3213 | { I_LCOEF_WBPS, "wbps=%u" }, | |
3214 | { I_LCOEF_WSEQIOPS, "wseqiops=%u" }, | |
3215 | { I_LCOEF_WRANDIOPS, "wrandiops=%u" }, | |
3216 | { NR_I_LCOEFS, NULL }, | |
3217 | }; | |
3218 | ||
3219 | static ssize_t ioc_cost_model_write(struct kernfs_open_file *of, char *input, | |
3220 | size_t nbytes, loff_t off) | |
3221 | { | |
3222 | struct gendisk *disk; | |
3223 | struct ioc *ioc; | |
3224 | u64 u[NR_I_LCOEFS]; | |
3225 | bool user; | |
3226 | char *p; | |
3227 | int ret; | |
3228 | ||
3229 | disk = blkcg_conf_get_disk(&input); | |
3230 | if (IS_ERR(disk)) | |
3231 | return PTR_ERR(disk); | |
3232 | ||
3233 | ioc = q_to_ioc(disk->queue); | |
3234 | if (!ioc) { | |
3235 | ret = blk_iocost_init(disk->queue); | |
3236 | if (ret) | |
3237 | goto err; | |
3238 | ioc = q_to_ioc(disk->queue); | |
3239 | } | |
3240 | ||
3241 | spin_lock_irq(&ioc->lock); | |
3242 | memcpy(u, ioc->params.i_lcoefs, sizeof(u)); | |
3243 | user = ioc->user_cost_model; | |
3244 | spin_unlock_irq(&ioc->lock); | |
3245 | ||
3246 | while ((p = strsep(&input, " \t\n"))) { | |
3247 | substring_t args[MAX_OPT_ARGS]; | |
3248 | char buf[32]; | |
3249 | int tok; | |
3250 | u64 v; | |
3251 | ||
3252 | if (!*p) | |
3253 | continue; | |
3254 | ||
3255 | switch (match_token(p, cost_ctrl_tokens, args)) { | |
3256 | case COST_CTRL: | |
3257 | match_strlcpy(buf, &args[0], sizeof(buf)); | |
3258 | if (!strcmp(buf, "auto")) | |
3259 | user = false; | |
3260 | else if (!strcmp(buf, "user")) | |
3261 | user = true; | |
3262 | else | |
3263 | goto einval; | |
3264 | continue; | |
3265 | case COST_MODEL: | |
3266 | match_strlcpy(buf, &args[0], sizeof(buf)); | |
3267 | if (strcmp(buf, "linear")) | |
3268 | goto einval; | |
3269 | continue; | |
3270 | } | |
3271 | ||
3272 | tok = match_token(p, i_lcoef_tokens, args); | |
3273 | if (tok == NR_I_LCOEFS) | |
3274 | goto einval; | |
3275 | if (match_u64(&args[0], &v)) | |
3276 | goto einval; | |
3277 | u[tok] = v; | |
3278 | user = true; | |
3279 | } | |
3280 | ||
3281 | spin_lock_irq(&ioc->lock); | |
3282 | if (user) { | |
3283 | memcpy(ioc->params.i_lcoefs, u, sizeof(u)); | |
3284 | ioc->user_cost_model = true; | |
3285 | } else { | |
3286 | ioc->user_cost_model = false; | |
3287 | } | |
3288 | ioc_refresh_params(ioc, true); | |
3289 | spin_unlock_irq(&ioc->lock); | |
3290 | ||
3291 | put_disk_and_module(disk); | |
3292 | return nbytes; | |
3293 | ||
3294 | einval: | |
3295 | ret = -EINVAL; | |
3296 | err: | |
3297 | put_disk_and_module(disk); | |
3298 | return ret; | |
3299 | } | |
3300 | ||
3301 | static struct cftype ioc_files[] = { | |
3302 | { | |
3303 | .name = "weight", | |
3304 | .flags = CFTYPE_NOT_ON_ROOT, | |
3305 | .seq_show = ioc_weight_show, | |
3306 | .write = ioc_weight_write, | |
3307 | }, | |
3308 | { | |
3309 | .name = "cost.qos", | |
3310 | .flags = CFTYPE_ONLY_ON_ROOT, | |
3311 | .seq_show = ioc_qos_show, | |
3312 | .write = ioc_qos_write, | |
3313 | }, | |
3314 | { | |
3315 | .name = "cost.model", | |
3316 | .flags = CFTYPE_ONLY_ON_ROOT, | |
3317 | .seq_show = ioc_cost_model_show, | |
3318 | .write = ioc_cost_model_write, | |
3319 | }, | |
3320 | {} | |
3321 | }; | |
3322 | ||
3323 | static struct blkcg_policy blkcg_policy_iocost = { | |
3324 | .dfl_cftypes = ioc_files, | |
3325 | .cpd_alloc_fn = ioc_cpd_alloc, | |
3326 | .cpd_free_fn = ioc_cpd_free, | |
3327 | .pd_alloc_fn = ioc_pd_alloc, | |
3328 | .pd_init_fn = ioc_pd_init, | |
3329 | .pd_free_fn = ioc_pd_free, | |
97eb1975 | 3330 | .pd_stat_fn = ioc_pd_stat, |
7caa4715 TH |
3331 | }; |
3332 | ||
3333 | static int __init ioc_init(void) | |
3334 | { | |
3335 | return blkcg_policy_register(&blkcg_policy_iocost); | |
3336 | } | |
3337 | ||
3338 | static void __exit ioc_exit(void) | |
3339 | { | |
3340 | return blkcg_policy_unregister(&blkcg_policy_iocost); | |
3341 | } | |
3342 | ||
3343 | module_init(ioc_init); | |
3344 | module_exit(ioc_exit); |