Commit | Line | Data |
---|---|---|
3dcf60bc | 1 | // SPDX-License-Identifier: GPL-2.0 |
d7067512 JB |
2 | /* |
3 | * Block rq-qos base io controller | |
4 | * | |
5 | * This works similar to wbt with a few exceptions | |
6 | * | |
7 | * - It's bio based, so the latency covers the whole block layer in addition to | |
8 | * the actual io. | |
9 | * - We will throttle all IO that comes in here if we need to. | |
10 | * - We use the mean latency over the 100ms window. This is because writes can | |
11 | * be particularly fast, which could give us a false sense of the impact of | |
12 | * other workloads on our protected workload. | |
a284390b JB |
13 | * - By default there's no throttling, we set the queue_depth to UINT_MAX so |
14 | * that we can have as many outstanding bio's as we're allowed to. Only at | |
d7067512 JB |
15 | * throttle time do we pay attention to the actual queue depth. |
16 | * | |
17 | * The hierarchy works like the cpu controller does, we track the latency at | |
18 | * every configured node, and each configured node has it's own independent | |
19 | * queue depth. This means that we only care about our latency targets at the | |
20 | * peer level. Some group at the bottom of the hierarchy isn't going to affect | |
21 | * a group at the end of some other path if we're only configred at leaf level. | |
22 | * | |
23 | * Consider the following | |
24 | * | |
25 | * root blkg | |
26 | * / \ | |
27 | * fast (target=5ms) slow (target=10ms) | |
28 | * / \ / \ | |
29 | * a b normal(15ms) unloved | |
30 | * | |
31 | * "a" and "b" have no target, but their combined io under "fast" cannot exceed | |
32 | * an average latency of 5ms. If it does then we will throttle the "slow" | |
33 | * group. In the case of "normal", if it exceeds its 15ms target, we will | |
34 | * throttle "unloved", but nobody else. | |
35 | * | |
36 | * In this example "fast", "slow", and "normal" will be the only groups actually | |
37 | * accounting their io latencies. We have to walk up the heirarchy to the root | |
38 | * on every submit and complete so we can do the appropriate stat recording and | |
39 | * adjust the queue depth of ourselves if needed. | |
40 | * | |
41 | * There are 2 ways we throttle IO. | |
42 | * | |
43 | * 1) Queue depth throttling. As we throttle down we will adjust the maximum | |
44 | * number of IO's we're allowed to have in flight. This starts at (u64)-1 down | |
45 | * to 1. If the group is only ever submitting IO for itself then this is the | |
46 | * only way we throttle. | |
47 | * | |
48 | * 2) Induced delay throttling. This is for the case that a group is generating | |
49 | * IO that has to be issued by the root cg to avoid priority inversion. So think | |
50 | * REQ_META or REQ_SWAP. If we are already at qd == 1 and we're getting a lot | |
51 | * of work done for us on behalf of the root cg and are being asked to scale | |
52 | * down more then we induce a latency at userspace return. We accumulate the | |
53 | * total amount of time we need to be punished by doing | |
54 | * | |
55 | * total_time += min_lat_nsec - actual_io_completion | |
56 | * | |
57 | * and then at throttle time will do | |
58 | * | |
59 | * throttle_time = min(total_time, NSEC_PER_SEC) | |
60 | * | |
61 | * This induced delay will throttle back the activity that is generating the | |
62 | * root cg issued io's, wethere that's some metadata intensive operation or the | |
63 | * group is using so much memory that it is pushing us into swap. | |
64 | * | |
65 | * Copyright (C) 2018 Josef Bacik | |
66 | */ | |
67 | #include <linux/kernel.h> | |
68 | #include <linux/blk_types.h> | |
69 | #include <linux/backing-dev.h> | |
70 | #include <linux/module.h> | |
71 | #include <linux/timer.h> | |
72 | #include <linux/memcontrol.h> | |
c480bcf9 | 73 | #include <linux/sched/loadavg.h> |
d7067512 JB |
74 | #include <linux/sched/signal.h> |
75 | #include <trace/events/block.h> | |
8c772a9b | 76 | #include <linux/blk-mq.h> |
d7067512 JB |
77 | #include "blk-rq-qos.h" |
78 | #include "blk-stat.h" | |
672fdcf0 | 79 | #include "blk-cgroup.h" |
373e915c | 80 | #include "blk.h" |
d7067512 JB |
81 | |
82 | #define DEFAULT_SCALE_COOKIE 1000000U | |
83 | ||
84 | static struct blkcg_policy blkcg_policy_iolatency; | |
85 | struct iolatency_grp; | |
86 | ||
87 | struct blk_iolatency { | |
88 | struct rq_qos rqos; | |
89 | struct timer_list timer; | |
8a177a36 TH |
90 | |
91 | /* | |
92 | * ->enabled is the master enable switch gating the throttling logic and | |
93 | * inflight tracking. The number of cgroups which have iolat enabled is | |
94 | * tracked in ->enable_cnt, and ->enable is flipped on/off accordingly | |
95 | * from ->enable_work with the request_queue frozen. For details, See | |
96 | * blkiolatency_enable_work_fn(). | |
97 | */ | |
98 | bool enabled; | |
99 | atomic_t enable_cnt; | |
100 | struct work_struct enable_work; | |
d7067512 JB |
101 | }; |
102 | ||
103 | static inline struct blk_iolatency *BLKIOLATENCY(struct rq_qos *rqos) | |
104 | { | |
105 | return container_of(rqos, struct blk_iolatency, rqos); | |
106 | } | |
107 | ||
d7067512 JB |
108 | struct child_latency_info { |
109 | spinlock_t lock; | |
110 | ||
111 | /* Last time we adjusted the scale of everybody. */ | |
112 | u64 last_scale_event; | |
113 | ||
114 | /* The latency that we missed. */ | |
115 | u64 scale_lat; | |
116 | ||
117 | /* Total io's from all of our children for the last summation. */ | |
118 | u64 nr_samples; | |
119 | ||
120 | /* The guy who actually changed the latency numbers. */ | |
121 | struct iolatency_grp *scale_grp; | |
122 | ||
123 | /* Cookie to tell if we need to scale up or down. */ | |
124 | atomic_t scale_cookie; | |
125 | }; | |
126 | ||
1fa2840e JB |
127 | struct percentile_stats { |
128 | u64 total; | |
129 | u64 missed; | |
130 | }; | |
131 | ||
132 | struct latency_stat { | |
133 | union { | |
134 | struct percentile_stats ps; | |
135 | struct blk_rq_stat rqs; | |
136 | }; | |
137 | }; | |
138 | ||
d7067512 JB |
139 | struct iolatency_grp { |
140 | struct blkg_policy_data pd; | |
1fa2840e | 141 | struct latency_stat __percpu *stats; |
451bb7c3 | 142 | struct latency_stat cur_stat; |
d7067512 | 143 | struct blk_iolatency *blkiolat; |
dc572f41 | 144 | unsigned int max_depth; |
d7067512 JB |
145 | struct rq_wait rq_wait; |
146 | atomic64_t window_start; | |
147 | atomic_t scale_cookie; | |
148 | u64 min_lat_nsec; | |
149 | u64 cur_win_nsec; | |
150 | ||
151 | /* total running average of our io latency. */ | |
c480bcf9 | 152 | u64 lat_avg; |
d7067512 JB |
153 | |
154 | /* Our current number of IO's for the last summation. */ | |
155 | u64 nr_samples; | |
156 | ||
1fa2840e | 157 | bool ssd; |
d7067512 JB |
158 | struct child_latency_info child_lat; |
159 | }; | |
160 | ||
c480bcf9 DZF |
161 | #define BLKIOLATENCY_MIN_WIN_SIZE (100 * NSEC_PER_MSEC) |
162 | #define BLKIOLATENCY_MAX_WIN_SIZE NSEC_PER_SEC | |
163 | /* | |
164 | * These are the constants used to fake the fixed-point moving average | |
8508cf3f | 165 | * calculation just like load average. The call to calc_load() folds |
c480bcf9 DZF |
166 | * (FIXED_1 (2048) - exp_factor) * new_sample into lat_avg. The sampling |
167 | * window size is bucketed to try to approximately calculate average | |
168 | * latency such that 1/exp (decay rate) is [1 min, 2.5 min) when windows | |
169 | * elapse immediately. Note, windows only elapse with IO activity. Idle | |
170 | * periods extend the most recent window. | |
171 | */ | |
172 | #define BLKIOLATENCY_NR_EXP_FACTORS 5 | |
173 | #define BLKIOLATENCY_EXP_BUCKET_SIZE (BLKIOLATENCY_MAX_WIN_SIZE / \ | |
174 | (BLKIOLATENCY_NR_EXP_FACTORS - 1)) | |
175 | static const u64 iolatency_exp_factors[BLKIOLATENCY_NR_EXP_FACTORS] = { | |
176 | 2045, // exp(1/600) - 600 samples | |
177 | 2039, // exp(1/240) - 240 samples | |
178 | 2031, // exp(1/120) - 120 samples | |
179 | 2023, // exp(1/80) - 80 samples | |
180 | 2014, // exp(1/60) - 60 samples | |
181 | }; | |
182 | ||
d7067512 JB |
183 | static inline struct iolatency_grp *pd_to_lat(struct blkg_policy_data *pd) |
184 | { | |
185 | return pd ? container_of(pd, struct iolatency_grp, pd) : NULL; | |
186 | } | |
187 | ||
188 | static inline struct iolatency_grp *blkg_to_lat(struct blkcg_gq *blkg) | |
189 | { | |
190 | return pd_to_lat(blkg_to_pd(blkg, &blkcg_policy_iolatency)); | |
191 | } | |
192 | ||
193 | static inline struct blkcg_gq *lat_to_blkg(struct iolatency_grp *iolat) | |
194 | { | |
195 | return pd_to_blkg(&iolat->pd); | |
196 | } | |
197 | ||
1fa2840e JB |
198 | static inline void latency_stat_init(struct iolatency_grp *iolat, |
199 | struct latency_stat *stat) | |
200 | { | |
201 | if (iolat->ssd) { | |
202 | stat->ps.total = 0; | |
203 | stat->ps.missed = 0; | |
204 | } else | |
205 | blk_rq_stat_init(&stat->rqs); | |
206 | } | |
207 | ||
208 | static inline void latency_stat_sum(struct iolatency_grp *iolat, | |
209 | struct latency_stat *sum, | |
210 | struct latency_stat *stat) | |
211 | { | |
212 | if (iolat->ssd) { | |
213 | sum->ps.total += stat->ps.total; | |
214 | sum->ps.missed += stat->ps.missed; | |
215 | } else | |
216 | blk_rq_stat_sum(&sum->rqs, &stat->rqs); | |
217 | } | |
218 | ||
219 | static inline void latency_stat_record_time(struct iolatency_grp *iolat, | |
220 | u64 req_time) | |
221 | { | |
222 | struct latency_stat *stat = get_cpu_ptr(iolat->stats); | |
223 | if (iolat->ssd) { | |
224 | if (req_time >= iolat->min_lat_nsec) | |
225 | stat->ps.missed++; | |
226 | stat->ps.total++; | |
227 | } else | |
228 | blk_rq_stat_add(&stat->rqs, req_time); | |
229 | put_cpu_ptr(stat); | |
230 | } | |
231 | ||
232 | static inline bool latency_sum_ok(struct iolatency_grp *iolat, | |
233 | struct latency_stat *stat) | |
234 | { | |
235 | if (iolat->ssd) { | |
236 | u64 thresh = div64_u64(stat->ps.total, 10); | |
237 | thresh = max(thresh, 1ULL); | |
238 | return stat->ps.missed < thresh; | |
239 | } | |
240 | return stat->rqs.mean <= iolat->min_lat_nsec; | |
241 | } | |
242 | ||
243 | static inline u64 latency_stat_samples(struct iolatency_grp *iolat, | |
244 | struct latency_stat *stat) | |
245 | { | |
246 | if (iolat->ssd) | |
247 | return stat->ps.total; | |
248 | return stat->rqs.nr_samples; | |
249 | } | |
250 | ||
251 | static inline void iolat_update_total_lat_avg(struct iolatency_grp *iolat, | |
252 | struct latency_stat *stat) | |
253 | { | |
254 | int exp_idx; | |
255 | ||
256 | if (iolat->ssd) | |
257 | return; | |
258 | ||
259 | /* | |
8508cf3f | 260 | * calc_load() takes in a number stored in fixed point representation. |
1fa2840e JB |
261 | * Because we are using this for IO time in ns, the values stored |
262 | * are significantly larger than the FIXED_1 denominator (2048). | |
263 | * Therefore, rounding errors in the calculation are negligible and | |
264 | * can be ignored. | |
265 | */ | |
266 | exp_idx = min_t(int, BLKIOLATENCY_NR_EXP_FACTORS - 1, | |
267 | div64_u64(iolat->cur_win_nsec, | |
268 | BLKIOLATENCY_EXP_BUCKET_SIZE)); | |
8508cf3f JW |
269 | iolat->lat_avg = calc_load(iolat->lat_avg, |
270 | iolatency_exp_factors[exp_idx], | |
271 | stat->rqs.mean); | |
1fa2840e JB |
272 | } |
273 | ||
d3fcdff1 | 274 | static void iolat_cleanup_cb(struct rq_wait *rqw, void *private_data) |
d7067512 | 275 | { |
d3fcdff1 JB |
276 | atomic_dec(&rqw->inflight); |
277 | wake_up(&rqw->wait); | |
278 | } | |
d7067512 | 279 | |
d3fcdff1 JB |
280 | static bool iolat_acquire_inflight(struct rq_wait *rqw, void *private_data) |
281 | { | |
282 | struct iolatency_grp *iolat = private_data; | |
dc572f41 | 283 | return rq_wait_inc_below(rqw, iolat->max_depth); |
d7067512 JB |
284 | } |
285 | ||
286 | static void __blkcg_iolatency_throttle(struct rq_qos *rqos, | |
287 | struct iolatency_grp *iolat, | |
d5337560 | 288 | bool issue_as_root, |
d7067512 | 289 | bool use_memdelay) |
d7067512 JB |
290 | { |
291 | struct rq_wait *rqw = &iolat->rq_wait; | |
292 | unsigned use_delay = atomic_read(&lat_to_blkg(iolat)->use_delay); | |
d7067512 JB |
293 | |
294 | if (use_delay) | |
ba91c849 | 295 | blkcg_schedule_throttle(rqos->disk, use_memdelay); |
d7067512 JB |
296 | |
297 | /* | |
298 | * To avoid priority inversions we want to just take a slot if we are | |
299 | * issuing as root. If we're being killed off there's no point in | |
300 | * delaying things, we may have been killed by OOM so throttling may | |
301 | * make recovery take even longer, so just let the IO's through so the | |
302 | * task can go away. | |
303 | */ | |
304 | if (issue_as_root || fatal_signal_pending(current)) { | |
305 | atomic_inc(&rqw->inflight); | |
306 | return; | |
307 | } | |
308 | ||
d3fcdff1 | 309 | rq_qos_wait(rqw, iolat, iolat_acquire_inflight, iolat_cleanup_cb); |
d7067512 JB |
310 | } |
311 | ||
312 | #define SCALE_DOWN_FACTOR 2 | |
313 | #define SCALE_UP_FACTOR 4 | |
314 | ||
315 | static inline unsigned long scale_amount(unsigned long qd, bool up) | |
316 | { | |
317 | return max(up ? qd >> SCALE_UP_FACTOR : qd >> SCALE_DOWN_FACTOR, 1UL); | |
318 | } | |
319 | ||
320 | /* | |
321 | * We scale the qd down faster than we scale up, so we need to use this helper | |
322 | * to adjust the scale_cookie accordingly so we don't prematurely get | |
323 | * scale_cookie at DEFAULT_SCALE_COOKIE and unthrottle too much. | |
324 | * | |
325 | * Each group has their own local copy of the last scale cookie they saw, so if | |
326 | * the global scale cookie goes up or down they know which way they need to go | |
327 | * based on their last knowledge of it. | |
328 | */ | |
329 | static void scale_cookie_change(struct blk_iolatency *blkiolat, | |
330 | struct child_latency_info *lat_info, | |
331 | bool up) | |
332 | { | |
ba91c849 | 333 | unsigned long qd = blkiolat->rqos.disk->queue->nr_requests; |
d7067512 JB |
334 | unsigned long scale = scale_amount(qd, up); |
335 | unsigned long old = atomic_read(&lat_info->scale_cookie); | |
336 | unsigned long max_scale = qd << 1; | |
337 | unsigned long diff = 0; | |
338 | ||
339 | if (old < DEFAULT_SCALE_COOKIE) | |
340 | diff = DEFAULT_SCALE_COOKIE - old; | |
341 | ||
342 | if (up) { | |
343 | if (scale + old > DEFAULT_SCALE_COOKIE) | |
344 | atomic_set(&lat_info->scale_cookie, | |
345 | DEFAULT_SCALE_COOKIE); | |
346 | else if (diff > qd) | |
347 | atomic_inc(&lat_info->scale_cookie); | |
348 | else | |
349 | atomic_add(scale, &lat_info->scale_cookie); | |
350 | } else { | |
351 | /* | |
352 | * We don't want to dig a hole so deep that it takes us hours to | |
353 | * dig out of it. Just enough that we don't throttle/unthrottle | |
354 | * with jagged workloads but can still unthrottle once pressure | |
355 | * has sufficiently dissipated. | |
356 | */ | |
357 | if (diff > qd) { | |
358 | if (diff < max_scale) | |
359 | atomic_dec(&lat_info->scale_cookie); | |
360 | } else { | |
361 | atomic_sub(scale, &lat_info->scale_cookie); | |
362 | } | |
363 | } | |
364 | } | |
365 | ||
366 | /* | |
6891f968 | 367 | * Change the queue depth of the iolatency_grp. We add 1/16th of the |
d7067512 | 368 | * queue depth at a time so we don't get wild swings and hopefully dial in to |
6891f968 KS |
369 | * fairer distribution of the overall queue depth. We halve the queue depth |
370 | * at a time so we can scale down queue depth quickly from default unlimited | |
371 | * to target. | |
d7067512 JB |
372 | */ |
373 | static void scale_change(struct iolatency_grp *iolat, bool up) | |
374 | { | |
ba91c849 | 375 | unsigned long qd = iolat->blkiolat->rqos.disk->queue->nr_requests; |
d7067512 | 376 | unsigned long scale = scale_amount(qd, up); |
dc572f41 | 377 | unsigned long old = iolat->max_depth; |
d7067512 JB |
378 | |
379 | if (old > qd) | |
380 | old = qd; | |
381 | ||
382 | if (up) { | |
383 | if (old == 1 && blkcg_unuse_delay(lat_to_blkg(iolat))) | |
384 | return; | |
385 | ||
386 | if (old < qd) { | |
d7067512 JB |
387 | old += scale; |
388 | old = min(old, qd); | |
dc572f41 | 389 | iolat->max_depth = old; |
d7067512 JB |
390 | wake_up_all(&iolat->rq_wait.wait); |
391 | } | |
9f60511a | 392 | } else { |
d7067512 | 393 | old >>= 1; |
dc572f41 | 394 | iolat->max_depth = max(old, 1UL); |
d7067512 JB |
395 | } |
396 | } | |
397 | ||
398 | /* Check our parent and see if the scale cookie has changed. */ | |
399 | static void check_scale_change(struct iolatency_grp *iolat) | |
400 | { | |
401 | struct iolatency_grp *parent; | |
402 | struct child_latency_info *lat_info; | |
403 | unsigned int cur_cookie; | |
404 | unsigned int our_cookie = atomic_read(&iolat->scale_cookie); | |
405 | u64 scale_lat; | |
d7067512 JB |
406 | int direction = 0; |
407 | ||
d7067512 JB |
408 | parent = blkg_to_lat(lat_to_blkg(iolat)->parent); |
409 | if (!parent) | |
410 | return; | |
411 | ||
412 | lat_info = &parent->child_lat; | |
413 | cur_cookie = atomic_read(&lat_info->scale_cookie); | |
414 | scale_lat = READ_ONCE(lat_info->scale_lat); | |
415 | ||
416 | if (cur_cookie < our_cookie) | |
417 | direction = -1; | |
418 | else if (cur_cookie > our_cookie) | |
419 | direction = 1; | |
420 | else | |
421 | return; | |
422 | ||
aee8960c UB |
423 | if (!atomic_try_cmpxchg(&iolat->scale_cookie, &our_cookie, cur_cookie)) { |
424 | /* Somebody beat us to the punch, just bail. */ | |
d7067512 | 425 | return; |
aee8960c | 426 | } |
d7067512 JB |
427 | |
428 | if (direction < 0 && iolat->min_lat_nsec) { | |
429 | u64 samples_thresh; | |
430 | ||
431 | if (!scale_lat || iolat->min_lat_nsec <= scale_lat) | |
432 | return; | |
433 | ||
434 | /* | |
435 | * Sometimes high priority groups are their own worst enemy, so | |
436 | * instead of taking it out on some poor other group that did 5% | |
437 | * or less of the IO's for the last summation just skip this | |
438 | * scale down event. | |
439 | */ | |
440 | samples_thresh = lat_info->nr_samples * 5; | |
22ed8a93 | 441 | samples_thresh = max(1ULL, div64_u64(samples_thresh, 100)); |
d7067512 JB |
442 | if (iolat->nr_samples <= samples_thresh) |
443 | return; | |
444 | } | |
445 | ||
446 | /* We're as low as we can go. */ | |
dc572f41 | 447 | if (iolat->max_depth == 1 && direction < 0) { |
d7067512 JB |
448 | blkcg_use_delay(lat_to_blkg(iolat)); |
449 | return; | |
450 | } | |
451 | ||
452 | /* We're back to the default cookie, unthrottle all the things. */ | |
453 | if (cur_cookie == DEFAULT_SCALE_COOKIE) { | |
454 | blkcg_clear_delay(lat_to_blkg(iolat)); | |
dc572f41 | 455 | iolat->max_depth = UINT_MAX; |
d7067512 JB |
456 | wake_up_all(&iolat->rq_wait.wait); |
457 | return; | |
458 | } | |
459 | ||
460 | scale_change(iolat, direction > 0); | |
461 | } | |
462 | ||
d5337560 | 463 | static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio) |
d7067512 JB |
464 | { |
465 | struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos); | |
5cdf2e3f | 466 | struct blkcg_gq *blkg = bio->bi_blkg; |
d7067512 JB |
467 | bool issue_as_root = bio_issue_as_root_blkg(bio); |
468 | ||
8a177a36 | 469 | if (!blkiolat->enabled) |
d7067512 JB |
470 | return; |
471 | ||
d7067512 JB |
472 | while (blkg && blkg->parent) { |
473 | struct iolatency_grp *iolat = blkg_to_lat(blkg); | |
474 | if (!iolat) { | |
475 | blkg = blkg->parent; | |
476 | continue; | |
477 | } | |
478 | ||
479 | check_scale_change(iolat); | |
d5337560 | 480 | __blkcg_iolatency_throttle(rqos, iolat, issue_as_root, |
d7067512 JB |
481 | (bio->bi_opf & REQ_SWAP) == REQ_SWAP); |
482 | blkg = blkg->parent; | |
483 | } | |
484 | if (!timer_pending(&blkiolat->timer)) | |
485 | mod_timer(&blkiolat->timer, jiffies + HZ); | |
486 | } | |
487 | ||
488 | static void iolatency_record_time(struct iolatency_grp *iolat, | |
489 | struct bio_issue *issue, u64 now, | |
490 | bool issue_as_root) | |
491 | { | |
d7067512 JB |
492 | u64 start = bio_issue_time(issue); |
493 | u64 req_time; | |
494 | ||
71e9690b JB |
495 | /* |
496 | * Have to do this so we are truncated to the correct time that our | |
497 | * issue is truncated to. | |
498 | */ | |
499 | now = __bio_issue_time(now); | |
500 | ||
d7067512 JB |
501 | if (now <= start) |
502 | return; | |
503 | ||
504 | req_time = now - start; | |
505 | ||
506 | /* | |
507 | * We don't want to count issue_as_root bio's in the cgroups latency | |
508 | * statistics as it could skew the numbers downwards. | |
509 | */ | |
dc572f41 | 510 | if (unlikely(issue_as_root && iolat->max_depth != UINT_MAX)) { |
d7067512 JB |
511 | u64 sub = iolat->min_lat_nsec; |
512 | if (req_time < sub) | |
513 | blkcg_add_delay(lat_to_blkg(iolat), now, sub - req_time); | |
514 | return; | |
515 | } | |
516 | ||
1fa2840e | 517 | latency_stat_record_time(iolat, req_time); |
d7067512 JB |
518 | } |
519 | ||
520 | #define BLKIOLATENCY_MIN_ADJUST_TIME (500 * NSEC_PER_MSEC) | |
521 | #define BLKIOLATENCY_MIN_GOOD_SAMPLES 5 | |
522 | ||
523 | static void iolatency_check_latencies(struct iolatency_grp *iolat, u64 now) | |
524 | { | |
525 | struct blkcg_gq *blkg = lat_to_blkg(iolat); | |
526 | struct iolatency_grp *parent; | |
527 | struct child_latency_info *lat_info; | |
1fa2840e | 528 | struct latency_stat stat; |
d7067512 | 529 | unsigned long flags; |
1fa2840e | 530 | int cpu; |
d7067512 | 531 | |
1fa2840e | 532 | latency_stat_init(iolat, &stat); |
d7067512 JB |
533 | preempt_disable(); |
534 | for_each_online_cpu(cpu) { | |
1fa2840e | 535 | struct latency_stat *s; |
d7067512 | 536 | s = per_cpu_ptr(iolat->stats, cpu); |
1fa2840e JB |
537 | latency_stat_sum(iolat, &stat, s); |
538 | latency_stat_init(iolat, s); | |
d7067512 JB |
539 | } |
540 | preempt_enable(); | |
541 | ||
d7067512 JB |
542 | parent = blkg_to_lat(blkg->parent); |
543 | if (!parent) | |
544 | return; | |
545 | ||
546 | lat_info = &parent->child_lat; | |
547 | ||
1fa2840e | 548 | iolat_update_total_lat_avg(iolat, &stat); |
d7067512 JB |
549 | |
550 | /* Everything is ok and we don't need to adjust the scale. */ | |
1fa2840e | 551 | if (latency_sum_ok(iolat, &stat) && |
d7067512 JB |
552 | atomic_read(&lat_info->scale_cookie) == DEFAULT_SCALE_COOKIE) |
553 | return; | |
554 | ||
555 | /* Somebody beat us to the punch, just bail. */ | |
556 | spin_lock_irqsave(&lat_info->lock, flags); | |
451bb7c3 JB |
557 | |
558 | latency_stat_sum(iolat, &iolat->cur_stat, &stat); | |
d7067512 | 559 | lat_info->nr_samples -= iolat->nr_samples; |
451bb7c3 JB |
560 | lat_info->nr_samples += latency_stat_samples(iolat, &iolat->cur_stat); |
561 | iolat->nr_samples = latency_stat_samples(iolat, &iolat->cur_stat); | |
d7067512 JB |
562 | |
563 | if ((lat_info->last_scale_event >= now || | |
451bb7c3 | 564 | now - lat_info->last_scale_event < BLKIOLATENCY_MIN_ADJUST_TIME)) |
d7067512 JB |
565 | goto out; |
566 | ||
451bb7c3 JB |
567 | if (latency_sum_ok(iolat, &iolat->cur_stat) && |
568 | latency_sum_ok(iolat, &stat)) { | |
569 | if (latency_stat_samples(iolat, &iolat->cur_stat) < | |
1fa2840e JB |
570 | BLKIOLATENCY_MIN_GOOD_SAMPLES) |
571 | goto out; | |
d7067512 JB |
572 | if (lat_info->scale_grp == iolat) { |
573 | lat_info->last_scale_event = now; | |
574 | scale_cookie_change(iolat->blkiolat, lat_info, true); | |
575 | } | |
451bb7c3 JB |
576 | } else if (lat_info->scale_lat == 0 || |
577 | lat_info->scale_lat >= iolat->min_lat_nsec) { | |
d7067512 JB |
578 | lat_info->last_scale_event = now; |
579 | if (!lat_info->scale_grp || | |
580 | lat_info->scale_lat > iolat->min_lat_nsec) { | |
581 | WRITE_ONCE(lat_info->scale_lat, iolat->min_lat_nsec); | |
582 | lat_info->scale_grp = iolat; | |
583 | } | |
584 | scale_cookie_change(iolat->blkiolat, lat_info, false); | |
585 | } | |
451bb7c3 | 586 | latency_stat_init(iolat, &iolat->cur_stat); |
d7067512 JB |
587 | out: |
588 | spin_unlock_irqrestore(&lat_info->lock, flags); | |
589 | } | |
590 | ||
591 | static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio) | |
592 | { | |
593 | struct blkcg_gq *blkg; | |
594 | struct rq_wait *rqw; | |
595 | struct iolatency_grp *iolat; | |
596 | u64 window_start; | |
6e2fa4dd | 597 | u64 now; |
d7067512 | 598 | bool issue_as_root = bio_issue_as_root_blkg(bio); |
391f552a | 599 | int inflight = 0; |
d7067512 JB |
600 | |
601 | blkg = bio->bi_blkg; | |
aa1b46dc | 602 | if (!blkg || !bio_flagged(bio, BIO_QOS_THROTTLED)) |
d7067512 JB |
603 | return; |
604 | ||
605 | iolat = blkg_to_lat(bio->bi_blkg); | |
606 | if (!iolat) | |
607 | return; | |
608 | ||
8a177a36 | 609 | if (!iolat->blkiolat->enabled) |
8c772a9b LB |
610 | return; |
611 | ||
6e2fa4dd | 612 | now = ktime_to_ns(ktime_get()); |
d7067512 JB |
613 | while (blkg && blkg->parent) { |
614 | iolat = blkg_to_lat(blkg); | |
615 | if (!iolat) { | |
616 | blkg = blkg->parent; | |
617 | continue; | |
618 | } | |
619 | rqw = &iolat->rq_wait; | |
620 | ||
391f552a LB |
621 | inflight = atomic_dec_return(&rqw->inflight); |
622 | WARN_ON_ONCE(inflight < 0); | |
c9b3007f DZ |
623 | /* |
624 | * If bi_status is BLK_STS_AGAIN, the bio wasn't actually | |
625 | * submitted, so do not account for it. | |
626 | */ | |
627 | if (iolat->min_lat_nsec && bio->bi_status != BLK_STS_AGAIN) { | |
628 | iolatency_record_time(iolat, &bio->bi_issue, now, | |
629 | issue_as_root); | |
630 | window_start = atomic64_read(&iolat->window_start); | |
631 | if (now > window_start && | |
632 | (now - window_start) >= iolat->cur_win_nsec) { | |
aee8960c UB |
633 | if (atomic64_try_cmpxchg(&iolat->window_start, |
634 | &window_start, now)) | |
c9b3007f DZ |
635 | iolatency_check_latencies(iolat, now); |
636 | } | |
d7067512 | 637 | } |
d7067512 | 638 | wake_up(&rqw->wait); |
d7067512 JB |
639 | blkg = blkg->parent; |
640 | } | |
641 | } | |
642 | ||
643 | static void blkcg_iolatency_exit(struct rq_qos *rqos) | |
644 | { | |
645 | struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos); | |
646 | ||
292a089d | 647 | timer_shutdown_sync(&blkiolat->timer); |
8a177a36 | 648 | flush_work(&blkiolat->enable_work); |
40e4996e | 649 | blkcg_deactivate_policy(rqos->disk, &blkcg_policy_iolatency); |
d7067512 JB |
650 | kfree(blkiolat); |
651 | } | |
652 | ||
3963d84d | 653 | static const struct rq_qos_ops blkcg_iolatency_ops = { |
d7067512 | 654 | .throttle = blkcg_iolatency_throttle, |
d7067512 JB |
655 | .done_bio = blkcg_iolatency_done_bio, |
656 | .exit = blkcg_iolatency_exit, | |
657 | }; | |
658 | ||
659 | static void blkiolatency_timer_fn(struct timer_list *t) | |
660 | { | |
661 | struct blk_iolatency *blkiolat = from_timer(blkiolat, t, timer); | |
662 | struct blkcg_gq *blkg; | |
663 | struct cgroup_subsys_state *pos_css; | |
664 | u64 now = ktime_to_ns(ktime_get()); | |
665 | ||
666 | rcu_read_lock(); | |
667 | blkg_for_each_descendant_pre(blkg, pos_css, | |
1231039d | 668 | blkiolat->rqos.disk->queue->root_blkg) { |
d7067512 JB |
669 | struct iolatency_grp *iolat; |
670 | struct child_latency_info *lat_info; | |
671 | unsigned long flags; | |
672 | u64 cookie; | |
673 | ||
674 | /* | |
675 | * We could be exiting, don't access the pd unless we have a | |
676 | * ref on the blkg. | |
677 | */ | |
7754f669 | 678 | if (!blkg_tryget(blkg)) |
d7067512 JB |
679 | continue; |
680 | ||
681 | iolat = blkg_to_lat(blkg); | |
682 | if (!iolat) | |
52a1199c | 683 | goto next; |
d7067512 JB |
684 | |
685 | lat_info = &iolat->child_lat; | |
686 | cookie = atomic_read(&lat_info->scale_cookie); | |
687 | ||
688 | if (cookie >= DEFAULT_SCALE_COOKIE) | |
689 | goto next; | |
690 | ||
691 | spin_lock_irqsave(&lat_info->lock, flags); | |
692 | if (lat_info->last_scale_event >= now) | |
693 | goto next_lock; | |
694 | ||
695 | /* | |
696 | * We scaled down but don't have a scale_grp, scale up and carry | |
697 | * on. | |
698 | */ | |
699 | if (lat_info->scale_grp == NULL) { | |
700 | scale_cookie_change(iolat->blkiolat, lat_info, true); | |
701 | goto next_lock; | |
702 | } | |
703 | ||
704 | /* | |
705 | * It's been 5 seconds since our last scale event, clear the | |
706 | * scale grp in case the group that needed the scale down isn't | |
707 | * doing any IO currently. | |
708 | */ | |
709 | if (now - lat_info->last_scale_event >= | |
710 | ((u64)NSEC_PER_SEC * 5)) | |
711 | lat_info->scale_grp = NULL; | |
712 | next_lock: | |
713 | spin_unlock_irqrestore(&lat_info->lock, flags); | |
714 | next: | |
715 | blkg_put(blkg); | |
716 | } | |
717 | rcu_read_unlock(); | |
718 | } | |
719 | ||
8a177a36 TH |
720 | /** |
721 | * blkiolatency_enable_work_fn - Enable or disable iolatency on the device | |
722 | * @work: enable_work of the blk_iolatency of interest | |
723 | * | |
724 | * iolatency needs to keep track of the number of in-flight IOs per cgroup. This | |
725 | * is relatively expensive as it involves walking up the hierarchy twice for | |
726 | * every IO. Thus, if iolatency is not enabled in any cgroup for the device, we | |
727 | * want to disable the in-flight tracking. | |
728 | * | |
729 | * We have to make sure that the counting is balanced - we don't want to leak | |
730 | * the in-flight counts by disabling accounting in the completion path while IOs | |
731 | * are in flight. This is achieved by ensuring that no IO is in flight by | |
732 | * freezing the queue while flipping ->enabled. As this requires a sleepable | |
733 | * context, ->enabled flipping is punted to this work function. | |
734 | */ | |
735 | static void blkiolatency_enable_work_fn(struct work_struct *work) | |
736 | { | |
737 | struct blk_iolatency *blkiolat = container_of(work, struct blk_iolatency, | |
738 | enable_work); | |
739 | bool enabled; | |
740 | ||
741 | /* | |
742 | * There can only be one instance of this function running for @blkiolat | |
743 | * and it's guaranteed to be executed at least once after the latest | |
744 | * ->enabled_cnt modification. Acting on the latest ->enable_cnt is | |
745 | * sufficient. | |
746 | * | |
747 | * Also, we know @blkiolat is safe to access as ->enable_work is flushed | |
748 | * in blkcg_iolatency_exit(). | |
749 | */ | |
750 | enabled = atomic_read(&blkiolat->enable_cnt); | |
751 | if (enabled != blkiolat->enabled) { | |
ba91c849 | 752 | blk_mq_freeze_queue(blkiolat->rqos.disk->queue); |
8a177a36 | 753 | blkiolat->enabled = enabled; |
ba91c849 | 754 | blk_mq_unfreeze_queue(blkiolat->rqos.disk->queue); |
8a177a36 TH |
755 | } |
756 | } | |
757 | ||
a13696b8 | 758 | static int blk_iolatency_init(struct gendisk *disk) |
d7067512 JB |
759 | { |
760 | struct blk_iolatency *blkiolat; | |
d7067512 JB |
761 | int ret; |
762 | ||
763 | blkiolat = kzalloc(sizeof(*blkiolat), GFP_KERNEL); | |
764 | if (!blkiolat) | |
765 | return -ENOMEM; | |
766 | ||
ce57b558 CH |
767 | ret = rq_qos_add(&blkiolat->rqos, disk, RQ_QOS_LATENCY, |
768 | &blkcg_iolatency_ops); | |
14a6e2eb JH |
769 | if (ret) |
770 | goto err_free; | |
40e4996e | 771 | ret = blkcg_activate_policy(disk, &blkcg_policy_iolatency); |
14a6e2eb JH |
772 | if (ret) |
773 | goto err_qos_del; | |
d7067512 JB |
774 | |
775 | timer_setup(&blkiolat->timer, blkiolatency_timer_fn, 0); | |
8a177a36 | 776 | INIT_WORK(&blkiolat->enable_work, blkiolatency_enable_work_fn); |
d7067512 JB |
777 | |
778 | return 0; | |
14a6e2eb JH |
779 | |
780 | err_qos_del: | |
ce57b558 | 781 | rq_qos_del(&blkiolat->rqos); |
14a6e2eb JH |
782 | err_free: |
783 | kfree(blkiolat); | |
784 | return ret; | |
d7067512 JB |
785 | } |
786 | ||
8a177a36 | 787 | static void iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val) |
d7067512 JB |
788 | { |
789 | struct iolatency_grp *iolat = blkg_to_lat(blkg); | |
8a177a36 | 790 | struct blk_iolatency *blkiolat = iolat->blkiolat; |
d7067512 JB |
791 | u64 oldval = iolat->min_lat_nsec; |
792 | ||
793 | iolat->min_lat_nsec = val; | |
c480bcf9 DZF |
794 | iolat->cur_win_nsec = max_t(u64, val << 4, BLKIOLATENCY_MIN_WIN_SIZE); |
795 | iolat->cur_win_nsec = min_t(u64, iolat->cur_win_nsec, | |
796 | BLKIOLATENCY_MAX_WIN_SIZE); | |
d7067512 | 797 | |
8a177a36 TH |
798 | if (!oldval && val) { |
799 | if (atomic_inc_return(&blkiolat->enable_cnt) == 1) | |
800 | schedule_work(&blkiolat->enable_work); | |
801 | } | |
5de0073f TH |
802 | if (oldval && !val) { |
803 | blkcg_clear_delay(blkg); | |
8a177a36 TH |
804 | if (atomic_dec_return(&blkiolat->enable_cnt) == 0) |
805 | schedule_work(&blkiolat->enable_work); | |
5de0073f | 806 | } |
d7067512 JB |
807 | } |
808 | ||
809 | static void iolatency_clear_scaling(struct blkcg_gq *blkg) | |
810 | { | |
811 | if (blkg->parent) { | |
812 | struct iolatency_grp *iolat = blkg_to_lat(blkg->parent); | |
813 | struct child_latency_info *lat_info; | |
814 | if (!iolat) | |
815 | return; | |
816 | ||
817 | lat_info = &iolat->child_lat; | |
818 | spin_lock(&lat_info->lock); | |
819 | atomic_set(&lat_info->scale_cookie, DEFAULT_SCALE_COOKIE); | |
820 | lat_info->last_scale_event = 0; | |
821 | lat_info->scale_grp = NULL; | |
822 | lat_info->scale_lat = 0; | |
823 | spin_unlock(&lat_info->lock); | |
824 | } | |
825 | } | |
826 | ||
827 | static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf, | |
828 | size_t nbytes, loff_t off) | |
829 | { | |
830 | struct blkcg *blkcg = css_to_blkcg(of_css(of)); | |
831 | struct blkcg_gq *blkg; | |
d7067512 JB |
832 | struct blkg_conf_ctx ctx; |
833 | struct iolatency_grp *iolat; | |
834 | char *p, *tok; | |
835 | u64 lat_val = 0; | |
836 | u64 oldval; | |
837 | int ret; | |
838 | ||
faffaab2 TH |
839 | blkg_conf_init(&ctx, buf); |
840 | ||
4eb44d10 LL |
841 | ret = blkg_conf_open_bdev(&ctx); |
842 | if (ret) | |
843 | goto out; | |
844 | ||
845 | /* | |
846 | * blk_iolatency_init() may fail after rq_qos_add() succeeds which can | |
847 | * confuse iolat_rq_qos() test. Make the test and init atomic. | |
848 | */ | |
18267a03 | 849 | lockdep_assert_held(&ctx.bdev->bd_queue->rq_qos_mutex); |
4eb44d10 LL |
850 | if (!iolat_rq_qos(ctx.bdev->bd_queue)) |
851 | ret = blk_iolatency_init(ctx.bdev->bd_disk); | |
a13696b8 TH |
852 | if (ret) |
853 | goto out; | |
854 | ||
faffaab2 | 855 | ret = blkg_conf_prep(blkcg, &blkcg_policy_iolatency, &ctx); |
d7067512 | 856 | if (ret) |
faffaab2 | 857 | goto out; |
d7067512 JB |
858 | |
859 | iolat = blkg_to_lat(ctx.blkg); | |
d7067512 JB |
860 | p = ctx.body; |
861 | ||
862 | ret = -EINVAL; | |
863 | while ((tok = strsep(&p, " "))) { | |
864 | char key[16]; | |
865 | char val[21]; /* 18446744073709551616 */ | |
866 | ||
867 | if (sscanf(tok, "%15[^=]=%20s", key, val) != 2) | |
868 | goto out; | |
869 | ||
870 | if (!strcmp(key, "target")) { | |
871 | u64 v; | |
872 | ||
873 | if (!strcmp(val, "max")) | |
874 | lat_val = 0; | |
875 | else if (sscanf(val, "%llu", &v) == 1) | |
876 | lat_val = v * NSEC_PER_USEC; | |
877 | else | |
878 | goto out; | |
879 | } else { | |
880 | goto out; | |
881 | } | |
882 | } | |
883 | ||
884 | /* Walk up the tree to see if our new val is lower than it should be. */ | |
885 | blkg = ctx.blkg; | |
886 | oldval = iolat->min_lat_nsec; | |
887 | ||
8a177a36 TH |
888 | iolatency_set_min_lat_nsec(blkg, lat_val); |
889 | if (oldval != iolat->min_lat_nsec) | |
d7067512 | 890 | iolatency_clear_scaling(blkg); |
d7067512 JB |
891 | ret = 0; |
892 | out: | |
faffaab2 | 893 | blkg_conf_exit(&ctx); |
d7067512 JB |
894 | return ret ?: nbytes; |
895 | } | |
896 | ||
897 | static u64 iolatency_prfill_limit(struct seq_file *sf, | |
898 | struct blkg_policy_data *pd, int off) | |
899 | { | |
900 | struct iolatency_grp *iolat = pd_to_lat(pd); | |
901 | const char *dname = blkg_dev_name(pd->blkg); | |
902 | ||
903 | if (!dname || !iolat->min_lat_nsec) | |
904 | return 0; | |
905 | seq_printf(sf, "%s target=%llu\n", | |
88b7210c | 906 | dname, div_u64(iolat->min_lat_nsec, NSEC_PER_USEC)); |
d7067512 JB |
907 | return 0; |
908 | } | |
909 | ||
910 | static int iolatency_print_limit(struct seq_file *sf, void *v) | |
911 | { | |
912 | blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), | |
913 | iolatency_prfill_limit, | |
914 | &blkcg_policy_iolatency, seq_cft(sf)->private, false); | |
915 | return 0; | |
916 | } | |
917 | ||
3607849d | 918 | static void iolatency_ssd_stat(struct iolatency_grp *iolat, struct seq_file *s) |
1fa2840e JB |
919 | { |
920 | struct latency_stat stat; | |
921 | int cpu; | |
922 | ||
923 | latency_stat_init(iolat, &stat); | |
924 | preempt_disable(); | |
925 | for_each_online_cpu(cpu) { | |
926 | struct latency_stat *s; | |
927 | s = per_cpu_ptr(iolat->stats, cpu); | |
928 | latency_stat_sum(iolat, &stat, s); | |
929 | } | |
930 | preempt_enable(); | |
931 | ||
dc572f41 | 932 | if (iolat->max_depth == UINT_MAX) |
252c651a CH |
933 | seq_printf(s, " missed=%llu total=%llu depth=max", |
934 | (unsigned long long)stat.ps.missed, | |
935 | (unsigned long long)stat.ps.total); | |
936 | else | |
937 | seq_printf(s, " missed=%llu total=%llu depth=%u", | |
938 | (unsigned long long)stat.ps.missed, | |
939 | (unsigned long long)stat.ps.total, | |
dc572f41 | 940 | iolat->max_depth); |
1fa2840e JB |
941 | } |
942 | ||
3607849d | 943 | static void iolatency_pd_stat(struct blkg_policy_data *pd, struct seq_file *s) |
d7067512 JB |
944 | { |
945 | struct iolatency_grp *iolat = pd_to_lat(pd); | |
1fa2840e JB |
946 | unsigned long long avg_lat; |
947 | unsigned long long cur_win; | |
948 | ||
07b0fdec | 949 | if (!blkcg_debug_stats) |
3607849d | 950 | return; |
07b0fdec | 951 | |
1fa2840e | 952 | if (iolat->ssd) |
252c651a | 953 | return iolatency_ssd_stat(iolat, s); |
d7067512 | 954 | |
1fa2840e JB |
955 | avg_lat = div64_u64(iolat->lat_avg, NSEC_PER_USEC); |
956 | cur_win = div64_u64(iolat->cur_win_nsec, NSEC_PER_MSEC); | |
dc572f41 | 957 | if (iolat->max_depth == UINT_MAX) |
252c651a CH |
958 | seq_printf(s, " depth=max avg_lat=%llu win=%llu", |
959 | avg_lat, cur_win); | |
960 | else | |
961 | seq_printf(s, " depth=%u avg_lat=%llu win=%llu", | |
dc572f41 | 962 | iolat->max_depth, avg_lat, cur_win); |
d7067512 JB |
963 | } |
964 | ||
0a0b4f79 CH |
965 | static struct blkg_policy_data *iolatency_pd_alloc(struct gendisk *disk, |
966 | struct blkcg *blkcg, gfp_t gfp) | |
d7067512 JB |
967 | { |
968 | struct iolatency_grp *iolat; | |
969 | ||
0a0b4f79 | 970 | iolat = kzalloc_node(sizeof(*iolat), gfp, disk->node_id); |
d7067512 JB |
971 | if (!iolat) |
972 | return NULL; | |
1fa2840e JB |
973 | iolat->stats = __alloc_percpu_gfp(sizeof(struct latency_stat), |
974 | __alignof__(struct latency_stat), gfp); | |
d7067512 JB |
975 | if (!iolat->stats) { |
976 | kfree(iolat); | |
977 | return NULL; | |
978 | } | |
979 | return &iolat->pd; | |
980 | } | |
981 | ||
982 | static void iolatency_pd_init(struct blkg_policy_data *pd) | |
983 | { | |
984 | struct iolatency_grp *iolat = pd_to_lat(pd); | |
985 | struct blkcg_gq *blkg = lat_to_blkg(iolat); | |
33049187 | 986 | struct rq_qos *rqos = iolat_rq_qos(blkg->q); |
d7067512 JB |
987 | struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos); |
988 | u64 now = ktime_to_ns(ktime_get()); | |
989 | int cpu; | |
990 | ||
a06377c5 | 991 | if (blk_queue_nonrot(blkg->q)) |
1fa2840e JB |
992 | iolat->ssd = true; |
993 | else | |
994 | iolat->ssd = false; | |
995 | ||
d7067512 | 996 | for_each_possible_cpu(cpu) { |
1fa2840e | 997 | struct latency_stat *stat; |
d7067512 | 998 | stat = per_cpu_ptr(iolat->stats, cpu); |
1fa2840e | 999 | latency_stat_init(iolat, stat); |
d7067512 JB |
1000 | } |
1001 | ||
451bb7c3 | 1002 | latency_stat_init(iolat, &iolat->cur_stat); |
d7067512 JB |
1003 | rq_wait_init(&iolat->rq_wait); |
1004 | spin_lock_init(&iolat->child_lat.lock); | |
dc572f41 | 1005 | iolat->max_depth = UINT_MAX; |
d7067512 JB |
1006 | iolat->blkiolat = blkiolat; |
1007 | iolat->cur_win_nsec = 100 * NSEC_PER_MSEC; | |
1008 | atomic64_set(&iolat->window_start, now); | |
1009 | ||
1010 | /* | |
1011 | * We init things in list order, so the pd for the parent may not be | |
1012 | * init'ed yet for whatever reason. | |
1013 | */ | |
1014 | if (blkg->parent && blkg_to_pd(blkg->parent, &blkcg_policy_iolatency)) { | |
1015 | struct iolatency_grp *parent = blkg_to_lat(blkg->parent); | |
1016 | atomic_set(&iolat->scale_cookie, | |
1017 | atomic_read(&parent->child_lat.scale_cookie)); | |
1018 | } else { | |
1019 | atomic_set(&iolat->scale_cookie, DEFAULT_SCALE_COOKIE); | |
1020 | } | |
1021 | ||
1022 | atomic_set(&iolat->child_lat.scale_cookie, DEFAULT_SCALE_COOKIE); | |
1023 | } | |
1024 | ||
1025 | static void iolatency_pd_offline(struct blkg_policy_data *pd) | |
1026 | { | |
1027 | struct iolatency_grp *iolat = pd_to_lat(pd); | |
1028 | struct blkcg_gq *blkg = lat_to_blkg(iolat); | |
1029 | ||
8a177a36 | 1030 | iolatency_set_min_lat_nsec(blkg, 0); |
d7067512 JB |
1031 | iolatency_clear_scaling(blkg); |
1032 | } | |
1033 | ||
1034 | static void iolatency_pd_free(struct blkg_policy_data *pd) | |
1035 | { | |
1036 | struct iolatency_grp *iolat = pd_to_lat(pd); | |
1037 | free_percpu(iolat->stats); | |
1038 | kfree(iolat); | |
1039 | } | |
1040 | ||
1041 | static struct cftype iolatency_files[] = { | |
1042 | { | |
1043 | .name = "latency", | |
1044 | .flags = CFTYPE_NOT_ON_ROOT, | |
1045 | .seq_show = iolatency_print_limit, | |
1046 | .write = iolatency_set_limit, | |
1047 | }, | |
1048 | {} | |
1049 | }; | |
1050 | ||
1051 | static struct blkcg_policy blkcg_policy_iolatency = { | |
1052 | .dfl_cftypes = iolatency_files, | |
1053 | .pd_alloc_fn = iolatency_pd_alloc, | |
1054 | .pd_init_fn = iolatency_pd_init, | |
1055 | .pd_offline_fn = iolatency_pd_offline, | |
1056 | .pd_free_fn = iolatency_pd_free, | |
1057 | .pd_stat_fn = iolatency_pd_stat, | |
1058 | }; | |
1059 | ||
1060 | static int __init iolatency_init(void) | |
1061 | { | |
1062 | return blkcg_policy_register(&blkcg_policy_iolatency); | |
1063 | } | |
1064 | ||
1065 | static void __exit iolatency_exit(void) | |
1066 | { | |
fa1c3eaf | 1067 | blkcg_policy_unregister(&blkcg_policy_iolatency); |
d7067512 JB |
1068 | } |
1069 | ||
1070 | module_init(iolatency_init); | |
1071 | module_exit(iolatency_exit); |