Commit | Line | Data |
---|---|---|
a497ee34 | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
ea25da48 PV |
2 | /* |
3 | * cgroups support for the BFQ I/O scheduler. | |
ea25da48 PV |
4 | */ |
5 | #include <linux/module.h> | |
6 | #include <linux/slab.h> | |
7 | #include <linux/blkdev.h> | |
8 | #include <linux/cgroup.h> | |
9 | #include <linux/elevator.h> | |
10 | #include <linux/ktime.h> | |
11 | #include <linux/rbtree.h> | |
12 | #include <linux/ioprio.h> | |
13 | #include <linux/sbitmap.h> | |
14 | #include <linux/delay.h> | |
15 | ||
16 | #include "bfq-iosched.h" | |
17 | ||
a33801e8 | 18 | #if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP) |
ea25da48 PV |
19 | |
20 | /* bfqg stats flags */ | |
21 | enum bfqg_stats_flags { | |
22 | BFQG_stats_waiting = 0, | |
23 | BFQG_stats_idling, | |
24 | BFQG_stats_empty, | |
25 | }; | |
26 | ||
27 | #define BFQG_FLAG_FNS(name) \ | |
28 | static void bfqg_stats_mark_##name(struct bfqg_stats *stats) \ | |
29 | { \ | |
30 | stats->flags |= (1 << BFQG_stats_##name); \ | |
31 | } \ | |
32 | static void bfqg_stats_clear_##name(struct bfqg_stats *stats) \ | |
33 | { \ | |
34 | stats->flags &= ~(1 << BFQG_stats_##name); \ | |
35 | } \ | |
36 | static int bfqg_stats_##name(struct bfqg_stats *stats) \ | |
37 | { \ | |
38 | return (stats->flags & (1 << BFQG_stats_##name)) != 0; \ | |
39 | } \ | |
40 | ||
41 | BFQG_FLAG_FNS(waiting) | |
42 | BFQG_FLAG_FNS(idling) | |
43 | BFQG_FLAG_FNS(empty) | |
44 | #undef BFQG_FLAG_FNS | |
45 | ||
8f9bebc3 | 46 | /* This should be called with the scheduler lock held. */ |
ea25da48 PV |
47 | static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats) |
48 | { | |
84c7afce | 49 | u64 now; |
ea25da48 PV |
50 | |
51 | if (!bfqg_stats_waiting(stats)) | |
52 | return; | |
53 | ||
84c7afce OS |
54 | now = ktime_get_ns(); |
55 | if (now > stats->start_group_wait_time) | |
ea25da48 PV |
56 | blkg_stat_add(&stats->group_wait_time, |
57 | now - stats->start_group_wait_time); | |
58 | bfqg_stats_clear_waiting(stats); | |
59 | } | |
60 | ||
8f9bebc3 | 61 | /* This should be called with the scheduler lock held. */ |
ea25da48 PV |
62 | static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg, |
63 | struct bfq_group *curr_bfqg) | |
64 | { | |
65 | struct bfqg_stats *stats = &bfqg->stats; | |
66 | ||
67 | if (bfqg_stats_waiting(stats)) | |
68 | return; | |
69 | if (bfqg == curr_bfqg) | |
70 | return; | |
84c7afce | 71 | stats->start_group_wait_time = ktime_get_ns(); |
ea25da48 PV |
72 | bfqg_stats_mark_waiting(stats); |
73 | } | |
74 | ||
8f9bebc3 | 75 | /* This should be called with the scheduler lock held. */ |
ea25da48 PV |
76 | static void bfqg_stats_end_empty_time(struct bfqg_stats *stats) |
77 | { | |
84c7afce | 78 | u64 now; |
ea25da48 PV |
79 | |
80 | if (!bfqg_stats_empty(stats)) | |
81 | return; | |
82 | ||
84c7afce OS |
83 | now = ktime_get_ns(); |
84 | if (now > stats->start_empty_time) | |
ea25da48 PV |
85 | blkg_stat_add(&stats->empty_time, |
86 | now - stats->start_empty_time); | |
87 | bfqg_stats_clear_empty(stats); | |
88 | } | |
89 | ||
90 | void bfqg_stats_update_dequeue(struct bfq_group *bfqg) | |
91 | { | |
92 | blkg_stat_add(&bfqg->stats.dequeue, 1); | |
93 | } | |
94 | ||
95 | void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg) | |
96 | { | |
97 | struct bfqg_stats *stats = &bfqg->stats; | |
98 | ||
99 | if (blkg_rwstat_total(&stats->queued)) | |
100 | return; | |
101 | ||
102 | /* | |
103 | * group is already marked empty. This can happen if bfqq got new | |
104 | * request in parent group and moved to this group while being added | |
105 | * to service tree. Just ignore the event and move on. | |
106 | */ | |
107 | if (bfqg_stats_empty(stats)) | |
108 | return; | |
109 | ||
84c7afce | 110 | stats->start_empty_time = ktime_get_ns(); |
ea25da48 PV |
111 | bfqg_stats_mark_empty(stats); |
112 | } | |
113 | ||
114 | void bfqg_stats_update_idle_time(struct bfq_group *bfqg) | |
115 | { | |
116 | struct bfqg_stats *stats = &bfqg->stats; | |
117 | ||
118 | if (bfqg_stats_idling(stats)) { | |
84c7afce | 119 | u64 now = ktime_get_ns(); |
ea25da48 | 120 | |
84c7afce | 121 | if (now > stats->start_idle_time) |
ea25da48 PV |
122 | blkg_stat_add(&stats->idle_time, |
123 | now - stats->start_idle_time); | |
124 | bfqg_stats_clear_idling(stats); | |
125 | } | |
126 | } | |
127 | ||
128 | void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg) | |
129 | { | |
130 | struct bfqg_stats *stats = &bfqg->stats; | |
131 | ||
84c7afce | 132 | stats->start_idle_time = ktime_get_ns(); |
ea25da48 PV |
133 | bfqg_stats_mark_idling(stats); |
134 | } | |
135 | ||
136 | void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg) | |
137 | { | |
138 | struct bfqg_stats *stats = &bfqg->stats; | |
139 | ||
140 | blkg_stat_add(&stats->avg_queue_size_sum, | |
141 | blkg_rwstat_total(&stats->queued)); | |
142 | blkg_stat_add(&stats->avg_queue_size_samples, 1); | |
143 | bfqg_stats_update_group_wait_time(stats); | |
144 | } | |
145 | ||
a33801e8 LM |
146 | void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq, |
147 | unsigned int op) | |
148 | { | |
149 | blkg_rwstat_add(&bfqg->stats.queued, op, 1); | |
150 | bfqg_stats_end_empty_time(&bfqg->stats); | |
151 | if (!(bfqq == ((struct bfq_data *)bfqg->bfqd)->in_service_queue)) | |
152 | bfqg_stats_set_start_group_wait_time(bfqg, bfqq_group(bfqq)); | |
153 | } | |
154 | ||
155 | void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op) | |
156 | { | |
157 | blkg_rwstat_add(&bfqg->stats.queued, op, -1); | |
158 | } | |
159 | ||
160 | void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op) | |
161 | { | |
162 | blkg_rwstat_add(&bfqg->stats.merged, op, 1); | |
163 | } | |
164 | ||
84c7afce OS |
165 | void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns, |
166 | u64 io_start_time_ns, unsigned int op) | |
a33801e8 LM |
167 | { |
168 | struct bfqg_stats *stats = &bfqg->stats; | |
84c7afce | 169 | u64 now = ktime_get_ns(); |
a33801e8 | 170 | |
84c7afce | 171 | if (now > io_start_time_ns) |
a33801e8 | 172 | blkg_rwstat_add(&stats->service_time, op, |
84c7afce OS |
173 | now - io_start_time_ns); |
174 | if (io_start_time_ns > start_time_ns) | |
a33801e8 | 175 | blkg_rwstat_add(&stats->wait_time, op, |
84c7afce | 176 | io_start_time_ns - start_time_ns); |
a33801e8 LM |
177 | } |
178 | ||
179 | #else /* CONFIG_BFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */ | |
180 | ||
181 | void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq, | |
182 | unsigned int op) { } | |
183 | void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op) { } | |
184 | void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op) { } | |
84c7afce OS |
185 | void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns, |
186 | u64 io_start_time_ns, unsigned int op) { } | |
a33801e8 LM |
187 | void bfqg_stats_update_dequeue(struct bfq_group *bfqg) { } |
188 | void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg) { } | |
189 | void bfqg_stats_update_idle_time(struct bfq_group *bfqg) { } | |
190 | void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg) { } | |
191 | void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg) { } | |
192 | ||
193 | #endif /* CONFIG_BFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */ | |
194 | ||
195 | #ifdef CONFIG_BFQ_GROUP_IOSCHED | |
196 | ||
ea25da48 PV |
197 | /* |
198 | * blk-cgroup policy-related handlers | |
199 | * The following functions help in converting between blk-cgroup | |
200 | * internal structures and BFQ-specific structures. | |
201 | */ | |
202 | ||
203 | static struct bfq_group *pd_to_bfqg(struct blkg_policy_data *pd) | |
204 | { | |
205 | return pd ? container_of(pd, struct bfq_group, pd) : NULL; | |
206 | } | |
207 | ||
208 | struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg) | |
209 | { | |
210 | return pd_to_blkg(&bfqg->pd); | |
211 | } | |
212 | ||
213 | static struct bfq_group *blkg_to_bfqg(struct blkcg_gq *blkg) | |
214 | { | |
215 | return pd_to_bfqg(blkg_to_pd(blkg, &blkcg_policy_bfq)); | |
216 | } | |
217 | ||
218 | /* | |
219 | * bfq_group handlers | |
220 | * The following functions help in navigating the bfq_group hierarchy | |
221 | * by allowing to find the parent of a bfq_group or the bfq_group | |
222 | * associated to a bfq_queue. | |
223 | */ | |
224 | ||
225 | static struct bfq_group *bfqg_parent(struct bfq_group *bfqg) | |
226 | { | |
227 | struct blkcg_gq *pblkg = bfqg_to_blkg(bfqg)->parent; | |
228 | ||
229 | return pblkg ? blkg_to_bfqg(pblkg) : NULL; | |
230 | } | |
231 | ||
232 | struct bfq_group *bfqq_group(struct bfq_queue *bfqq) | |
233 | { | |
234 | struct bfq_entity *group_entity = bfqq->entity.parent; | |
235 | ||
236 | return group_entity ? container_of(group_entity, struct bfq_group, | |
237 | entity) : | |
238 | bfqq->bfqd->root_group; | |
239 | } | |
240 | ||
241 | /* | |
242 | * The following two functions handle get and put of a bfq_group by | |
243 | * wrapping the related blk-cgroup hooks. | |
244 | */ | |
245 | ||
246 | static void bfqg_get(struct bfq_group *bfqg) | |
247 | { | |
8f9bebc3 | 248 | bfqg->ref++; |
ea25da48 PV |
249 | } |
250 | ||
dfb79af5 | 251 | static void bfqg_put(struct bfq_group *bfqg) |
ea25da48 | 252 | { |
8f9bebc3 PV |
253 | bfqg->ref--; |
254 | ||
255 | if (bfqg->ref == 0) | |
256 | kfree(bfqg); | |
257 | } | |
258 | ||
259 | static void bfqg_and_blkg_get(struct bfq_group *bfqg) | |
260 | { | |
261 | /* see comments in bfq_bic_update_cgroup for why refcounting bfqg */ | |
262 | bfqg_get(bfqg); | |
263 | ||
264 | blkg_get(bfqg_to_blkg(bfqg)); | |
265 | } | |
266 | ||
267 | void bfqg_and_blkg_put(struct bfq_group *bfqg) | |
268 | { | |
8f9bebc3 | 269 | blkg_put(bfqg_to_blkg(bfqg)); |
d5274b3c KK |
270 | |
271 | bfqg_put(bfqg); | |
ea25da48 PV |
272 | } |
273 | ||
ea25da48 PV |
274 | /* @stats = 0 */ |
275 | static void bfqg_stats_reset(struct bfqg_stats *stats) | |
276 | { | |
a33801e8 | 277 | #ifdef CONFIG_DEBUG_BLK_CGROUP |
ea25da48 PV |
278 | /* queued stats shouldn't be cleared */ |
279 | blkg_rwstat_reset(&stats->merged); | |
280 | blkg_rwstat_reset(&stats->service_time); | |
281 | blkg_rwstat_reset(&stats->wait_time); | |
282 | blkg_stat_reset(&stats->time); | |
283 | blkg_stat_reset(&stats->avg_queue_size_sum); | |
284 | blkg_stat_reset(&stats->avg_queue_size_samples); | |
285 | blkg_stat_reset(&stats->dequeue); | |
286 | blkg_stat_reset(&stats->group_wait_time); | |
287 | blkg_stat_reset(&stats->idle_time); | |
288 | blkg_stat_reset(&stats->empty_time); | |
a33801e8 | 289 | #endif |
ea25da48 PV |
290 | } |
291 | ||
292 | /* @to += @from */ | |
293 | static void bfqg_stats_add_aux(struct bfqg_stats *to, struct bfqg_stats *from) | |
294 | { | |
295 | if (!to || !from) | |
296 | return; | |
297 | ||
a33801e8 | 298 | #ifdef CONFIG_DEBUG_BLK_CGROUP |
ea25da48 PV |
299 | /* queued stats shouldn't be cleared */ |
300 | blkg_rwstat_add_aux(&to->merged, &from->merged); | |
301 | blkg_rwstat_add_aux(&to->service_time, &from->service_time); | |
302 | blkg_rwstat_add_aux(&to->wait_time, &from->wait_time); | |
303 | blkg_stat_add_aux(&from->time, &from->time); | |
304 | blkg_stat_add_aux(&to->avg_queue_size_sum, &from->avg_queue_size_sum); | |
305 | blkg_stat_add_aux(&to->avg_queue_size_samples, | |
306 | &from->avg_queue_size_samples); | |
307 | blkg_stat_add_aux(&to->dequeue, &from->dequeue); | |
308 | blkg_stat_add_aux(&to->group_wait_time, &from->group_wait_time); | |
309 | blkg_stat_add_aux(&to->idle_time, &from->idle_time); | |
310 | blkg_stat_add_aux(&to->empty_time, &from->empty_time); | |
a33801e8 | 311 | #endif |
ea25da48 PV |
312 | } |
313 | ||
314 | /* | |
315 | * Transfer @bfqg's stats to its parent's aux counts so that the ancestors' | |
316 | * recursive stats can still account for the amount used by this bfqg after | |
317 | * it's gone. | |
318 | */ | |
319 | static void bfqg_stats_xfer_dead(struct bfq_group *bfqg) | |
320 | { | |
321 | struct bfq_group *parent; | |
322 | ||
323 | if (!bfqg) /* root_group */ | |
324 | return; | |
325 | ||
326 | parent = bfqg_parent(bfqg); | |
327 | ||
0d945c1f | 328 | lockdep_assert_held(&bfqg_to_blkg(bfqg)->q->queue_lock); |
ea25da48 PV |
329 | |
330 | if (unlikely(!parent)) | |
331 | return; | |
332 | ||
333 | bfqg_stats_add_aux(&parent->stats, &bfqg->stats); | |
334 | bfqg_stats_reset(&bfqg->stats); | |
335 | } | |
336 | ||
337 | void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg) | |
338 | { | |
339 | struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); | |
340 | ||
341 | entity->weight = entity->new_weight; | |
342 | entity->orig_weight = entity->new_weight; | |
343 | if (bfqq) { | |
344 | bfqq->ioprio = bfqq->new_ioprio; | |
345 | bfqq->ioprio_class = bfqq->new_ioprio_class; | |
8f9bebc3 PV |
346 | /* |
347 | * Make sure that bfqg and its associated blkg do not | |
348 | * disappear before entity. | |
349 | */ | |
350 | bfqg_and_blkg_get(bfqg); | |
ea25da48 PV |
351 | } |
352 | entity->parent = bfqg->my_entity; /* NULL for root group */ | |
353 | entity->sched_data = &bfqg->sched_data; | |
354 | } | |
355 | ||
356 | static void bfqg_stats_exit(struct bfqg_stats *stats) | |
357 | { | |
a33801e8 | 358 | #ifdef CONFIG_DEBUG_BLK_CGROUP |
ea25da48 PV |
359 | blkg_rwstat_exit(&stats->merged); |
360 | blkg_rwstat_exit(&stats->service_time); | |
361 | blkg_rwstat_exit(&stats->wait_time); | |
362 | blkg_rwstat_exit(&stats->queued); | |
363 | blkg_stat_exit(&stats->time); | |
364 | blkg_stat_exit(&stats->avg_queue_size_sum); | |
365 | blkg_stat_exit(&stats->avg_queue_size_samples); | |
366 | blkg_stat_exit(&stats->dequeue); | |
367 | blkg_stat_exit(&stats->group_wait_time); | |
368 | blkg_stat_exit(&stats->idle_time); | |
369 | blkg_stat_exit(&stats->empty_time); | |
a33801e8 | 370 | #endif |
ea25da48 PV |
371 | } |
372 | ||
373 | static int bfqg_stats_init(struct bfqg_stats *stats, gfp_t gfp) | |
374 | { | |
a33801e8 | 375 | #ifdef CONFIG_DEBUG_BLK_CGROUP |
ea25da48 PV |
376 | if (blkg_rwstat_init(&stats->merged, gfp) || |
377 | blkg_rwstat_init(&stats->service_time, gfp) || | |
378 | blkg_rwstat_init(&stats->wait_time, gfp) || | |
379 | blkg_rwstat_init(&stats->queued, gfp) || | |
380 | blkg_stat_init(&stats->time, gfp) || | |
381 | blkg_stat_init(&stats->avg_queue_size_sum, gfp) || | |
382 | blkg_stat_init(&stats->avg_queue_size_samples, gfp) || | |
383 | blkg_stat_init(&stats->dequeue, gfp) || | |
384 | blkg_stat_init(&stats->group_wait_time, gfp) || | |
385 | blkg_stat_init(&stats->idle_time, gfp) || | |
386 | blkg_stat_init(&stats->empty_time, gfp)) { | |
387 | bfqg_stats_exit(stats); | |
388 | return -ENOMEM; | |
389 | } | |
a33801e8 | 390 | #endif |
ea25da48 PV |
391 | |
392 | return 0; | |
393 | } | |
394 | ||
395 | static struct bfq_group_data *cpd_to_bfqgd(struct blkcg_policy_data *cpd) | |
396 | { | |
397 | return cpd ? container_of(cpd, struct bfq_group_data, pd) : NULL; | |
398 | } | |
399 | ||
400 | static struct bfq_group_data *blkcg_to_bfqgd(struct blkcg *blkcg) | |
401 | { | |
402 | return cpd_to_bfqgd(blkcg_to_cpd(blkcg, &blkcg_policy_bfq)); | |
403 | } | |
404 | ||
dfb79af5 | 405 | static struct blkcg_policy_data *bfq_cpd_alloc(gfp_t gfp) |
ea25da48 PV |
406 | { |
407 | struct bfq_group_data *bgd; | |
408 | ||
409 | bgd = kzalloc(sizeof(*bgd), gfp); | |
410 | if (!bgd) | |
411 | return NULL; | |
412 | return &bgd->pd; | |
413 | } | |
414 | ||
dfb79af5 | 415 | static void bfq_cpd_init(struct blkcg_policy_data *cpd) |
ea25da48 PV |
416 | { |
417 | struct bfq_group_data *d = cpd_to_bfqgd(cpd); | |
418 | ||
419 | d->weight = cgroup_subsys_on_dfl(io_cgrp_subsys) ? | |
420 | CGROUP_WEIGHT_DFL : BFQ_WEIGHT_LEGACY_DFL; | |
421 | } | |
422 | ||
dfb79af5 | 423 | static void bfq_cpd_free(struct blkcg_policy_data *cpd) |
ea25da48 PV |
424 | { |
425 | kfree(cpd_to_bfqgd(cpd)); | |
426 | } | |
427 | ||
dfb79af5 | 428 | static struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, int node) |
ea25da48 PV |
429 | { |
430 | struct bfq_group *bfqg; | |
431 | ||
432 | bfqg = kzalloc_node(sizeof(*bfqg), gfp, node); | |
433 | if (!bfqg) | |
434 | return NULL; | |
435 | ||
436 | if (bfqg_stats_init(&bfqg->stats, gfp)) { | |
437 | kfree(bfqg); | |
438 | return NULL; | |
439 | } | |
440 | ||
8f9bebc3 PV |
441 | /* see comments in bfq_bic_update_cgroup for why refcounting */ |
442 | bfqg_get(bfqg); | |
ea25da48 PV |
443 | return &bfqg->pd; |
444 | } | |
445 | ||
dfb79af5 | 446 | static void bfq_pd_init(struct blkg_policy_data *pd) |
ea25da48 PV |
447 | { |
448 | struct blkcg_gq *blkg = pd_to_blkg(pd); | |
449 | struct bfq_group *bfqg = blkg_to_bfqg(blkg); | |
450 | struct bfq_data *bfqd = blkg->q->elevator->elevator_data; | |
451 | struct bfq_entity *entity = &bfqg->entity; | |
452 | struct bfq_group_data *d = blkcg_to_bfqgd(blkg->blkcg); | |
453 | ||
454 | entity->orig_weight = entity->weight = entity->new_weight = d->weight; | |
455 | entity->my_sched_data = &bfqg->sched_data; | |
456 | bfqg->my_entity = entity; /* | |
457 | * the root_group's will be set to NULL | |
458 | * in bfq_init_queue() | |
459 | */ | |
460 | bfqg->bfqd = bfqd; | |
461 | bfqg->active_entities = 0; | |
462 | bfqg->rq_pos_tree = RB_ROOT; | |
463 | } | |
464 | ||
dfb79af5 | 465 | static void bfq_pd_free(struct blkg_policy_data *pd) |
ea25da48 PV |
466 | { |
467 | struct bfq_group *bfqg = pd_to_bfqg(pd); | |
468 | ||
469 | bfqg_stats_exit(&bfqg->stats); | |
8f9bebc3 | 470 | bfqg_put(bfqg); |
ea25da48 PV |
471 | } |
472 | ||
dfb79af5 | 473 | static void bfq_pd_reset_stats(struct blkg_policy_data *pd) |
ea25da48 PV |
474 | { |
475 | struct bfq_group *bfqg = pd_to_bfqg(pd); | |
476 | ||
477 | bfqg_stats_reset(&bfqg->stats); | |
478 | } | |
479 | ||
480 | static void bfq_group_set_parent(struct bfq_group *bfqg, | |
481 | struct bfq_group *parent) | |
482 | { | |
483 | struct bfq_entity *entity; | |
484 | ||
485 | entity = &bfqg->entity; | |
486 | entity->parent = parent->my_entity; | |
487 | entity->sched_data = &parent->sched_data; | |
488 | } | |
489 | ||
490 | static struct bfq_group *bfq_lookup_bfqg(struct bfq_data *bfqd, | |
491 | struct blkcg *blkcg) | |
492 | { | |
493 | struct blkcg_gq *blkg; | |
494 | ||
495 | blkg = blkg_lookup(blkcg, bfqd->queue); | |
496 | if (likely(blkg)) | |
497 | return blkg_to_bfqg(blkg); | |
498 | return NULL; | |
499 | } | |
500 | ||
501 | struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd, | |
502 | struct blkcg *blkcg) | |
503 | { | |
504 | struct bfq_group *bfqg, *parent; | |
505 | struct bfq_entity *entity; | |
506 | ||
507 | bfqg = bfq_lookup_bfqg(bfqd, blkcg); | |
508 | ||
509 | if (unlikely(!bfqg)) | |
510 | return NULL; | |
511 | ||
512 | /* | |
513 | * Update chain of bfq_groups as we might be handling a leaf group | |
514 | * which, along with some of its relatives, has not been hooked yet | |
515 | * to the private hierarchy of BFQ. | |
516 | */ | |
517 | entity = &bfqg->entity; | |
518 | for_each_entity(entity) { | |
519 | bfqg = container_of(entity, struct bfq_group, entity); | |
520 | if (bfqg != bfqd->root_group) { | |
521 | parent = bfqg_parent(bfqg); | |
522 | if (!parent) | |
523 | parent = bfqd->root_group; | |
524 | bfq_group_set_parent(bfqg, parent); | |
525 | } | |
526 | } | |
527 | ||
528 | return bfqg; | |
529 | } | |
530 | ||
531 | /** | |
532 | * bfq_bfqq_move - migrate @bfqq to @bfqg. | |
533 | * @bfqd: queue descriptor. | |
534 | * @bfqq: the queue to move. | |
535 | * @bfqg: the group to move to. | |
536 | * | |
537 | * Move @bfqq to @bfqg, deactivating it from its old group and reactivating | |
538 | * it on the new one. Avoid putting the entity on the old group idle tree. | |
539 | * | |
8f9bebc3 PV |
540 | * Must be called under the scheduler lock, to make sure that the blkg |
541 | * owning @bfqg does not disappear (see comments in | |
542 | * bfq_bic_update_cgroup on guaranteeing the consistency of blkg | |
543 | * objects). | |
ea25da48 PV |
544 | */ |
545 | void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, | |
546 | struct bfq_group *bfqg) | |
547 | { | |
548 | struct bfq_entity *entity = &bfqq->entity; | |
549 | ||
550 | /* If bfqq is empty, then bfq_bfqq_expire also invokes | |
551 | * bfq_del_bfqq_busy, thereby removing bfqq and its entity | |
552 | * from data structures related to current group. Otherwise we | |
553 | * need to remove bfqq explicitly with bfq_deactivate_bfqq, as | |
554 | * we do below. | |
555 | */ | |
556 | if (bfqq == bfqd->in_service_queue) | |
557 | bfq_bfqq_expire(bfqd, bfqd->in_service_queue, | |
558 | false, BFQQE_PREEMPTED); | |
559 | ||
560 | if (bfq_bfqq_busy(bfqq)) | |
561 | bfq_deactivate_bfqq(bfqd, bfqq, false, false); | |
562 | else if (entity->on_st) | |
563 | bfq_put_idle_entity(bfq_entity_service_tree(entity), entity); | |
8f9bebc3 | 564 | bfqg_and_blkg_put(bfqq_group(bfqq)); |
ea25da48 | 565 | |
ea25da48 PV |
566 | entity->parent = bfqg->my_entity; |
567 | entity->sched_data = &bfqg->sched_data; | |
8f9bebc3 PV |
568 | /* pin down bfqg and its associated blkg */ |
569 | bfqg_and_blkg_get(bfqg); | |
ea25da48 PV |
570 | |
571 | if (bfq_bfqq_busy(bfqq)) { | |
8cacc5ab PV |
572 | if (unlikely(!bfqd->nonrot_with_queueing)) |
573 | bfq_pos_tree_add_move(bfqd, bfqq); | |
ea25da48 PV |
574 | bfq_activate_bfqq(bfqd, bfqq); |
575 | } | |
576 | ||
577 | if (!bfqd->in_service_queue && !bfqd->rq_in_driver) | |
578 | bfq_schedule_dispatch(bfqd); | |
579 | } | |
580 | ||
581 | /** | |
582 | * __bfq_bic_change_cgroup - move @bic to @cgroup. | |
583 | * @bfqd: the queue descriptor. | |
584 | * @bic: the bic to move. | |
585 | * @blkcg: the blk-cgroup to move to. | |
586 | * | |
8f9bebc3 PV |
587 | * Move bic to blkcg, assuming that bfqd->lock is held; which makes |
588 | * sure that the reference to cgroup is valid across the call (see | |
589 | * comments in bfq_bic_update_cgroup on this issue) | |
ea25da48 PV |
590 | * |
591 | * NOTE: an alternative approach might have been to store the current | |
592 | * cgroup in bfqq and getting a reference to it, reducing the lookup | |
593 | * time here, at the price of slightly more complex code. | |
594 | */ | |
595 | static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd, | |
596 | struct bfq_io_cq *bic, | |
597 | struct blkcg *blkcg) | |
598 | { | |
599 | struct bfq_queue *async_bfqq = bic_to_bfqq(bic, 0); | |
600 | struct bfq_queue *sync_bfqq = bic_to_bfqq(bic, 1); | |
601 | struct bfq_group *bfqg; | |
602 | struct bfq_entity *entity; | |
603 | ||
604 | bfqg = bfq_find_set_group(bfqd, blkcg); | |
605 | ||
606 | if (unlikely(!bfqg)) | |
607 | bfqg = bfqd->root_group; | |
608 | ||
609 | if (async_bfqq) { | |
610 | entity = &async_bfqq->entity; | |
611 | ||
612 | if (entity->sched_data != &bfqg->sched_data) { | |
613 | bic_set_bfqq(bic, NULL, 0); | |
614 | bfq_log_bfqq(bfqd, async_bfqq, | |
615 | "bic_change_group: %p %d", | |
616 | async_bfqq, async_bfqq->ref); | |
617 | bfq_put_queue(async_bfqq); | |
618 | } | |
619 | } | |
620 | ||
621 | if (sync_bfqq) { | |
622 | entity = &sync_bfqq->entity; | |
623 | if (entity->sched_data != &bfqg->sched_data) | |
624 | bfq_bfqq_move(bfqd, sync_bfqq, bfqg); | |
625 | } | |
626 | ||
627 | return bfqg; | |
628 | } | |
629 | ||
630 | void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) | |
631 | { | |
632 | struct bfq_data *bfqd = bic_to_bfqd(bic); | |
633 | struct bfq_group *bfqg = NULL; | |
634 | uint64_t serial_nr; | |
635 | ||
636 | rcu_read_lock(); | |
0fe061b9 | 637 | serial_nr = __bio_blkcg(bio)->css.serial_nr; |
ea25da48 PV |
638 | |
639 | /* | |
640 | * Check whether blkcg has changed. The condition may trigger | |
641 | * spuriously on a newly created cic but there's no harm. | |
642 | */ | |
643 | if (unlikely(!bfqd) || likely(bic->blkcg_serial_nr == serial_nr)) | |
644 | goto out; | |
645 | ||
0fe061b9 | 646 | bfqg = __bfq_bic_change_cgroup(bfqd, bic, __bio_blkcg(bio)); |
8f9bebc3 PV |
647 | /* |
648 | * Update blkg_path for bfq_log_* functions. We cache this | |
649 | * path, and update it here, for the following | |
650 | * reasons. Operations on blkg objects in blk-cgroup are | |
651 | * protected with the request_queue lock, and not with the | |
652 | * lock that protects the instances of this scheduler | |
653 | * (bfqd->lock). This exposes BFQ to the following sort of | |
654 | * race. | |
655 | * | |
656 | * The blkg_lookup performed in bfq_get_queue, protected | |
657 | * through rcu, may happen to return the address of a copy of | |
658 | * the original blkg. If this is the case, then the | |
659 | * bfqg_and_blkg_get performed in bfq_get_queue, to pin down | |
660 | * the blkg, is useless: it does not prevent blk-cgroup code | |
661 | * from destroying both the original blkg and all objects | |
662 | * directly or indirectly referred by the copy of the | |
663 | * blkg. | |
664 | * | |
665 | * On the bright side, destroy operations on a blkg invoke, as | |
666 | * a first step, hooks of the scheduler associated with the | |
667 | * blkg. And these hooks are executed with bfqd->lock held for | |
668 | * BFQ. As a consequence, for any blkg associated with the | |
669 | * request queue this instance of the scheduler is attached | |
670 | * to, we are guaranteed that such a blkg is not destroyed, and | |
671 | * that all the pointers it contains are consistent, while we | |
672 | * are holding bfqd->lock. A blkg_lookup performed with | |
673 | * bfqd->lock held then returns a fully consistent blkg, which | |
674 | * remains consistent until this lock is held. | |
675 | * | |
676 | * Thanks to the last fact, and to the fact that: (1) bfqg has | |
677 | * been obtained through a blkg_lookup in the above | |
678 | * assignment, and (2) bfqd->lock is being held, here we can | |
679 | * safely use the policy data for the involved blkg (i.e., the | |
680 | * field bfqg->pd) to get to the blkg associated with bfqg, | |
681 | * and then we can safely use any field of blkg. After we | |
682 | * release bfqd->lock, even just getting blkg through this | |
683 | * bfqg may cause dangling references to be traversed, as | |
684 | * bfqg->pd may not exist any more. | |
685 | * | |
686 | * In view of the above facts, here we cache, in the bfqg, any | |
687 | * blkg data we may need for this bic, and for its associated | |
688 | * bfq_queue. As of now, we need to cache only the path of the | |
689 | * blkg, which is used in the bfq_log_* functions. | |
690 | * | |
691 | * Finally, note that bfqg itself needs to be protected from | |
692 | * destruction on the blkg_free of the original blkg (which | |
693 | * invokes bfq_pd_free). We use an additional private | |
694 | * refcounter for bfqg, to let it disappear only after no | |
695 | * bfq_queue refers to it any longer. | |
696 | */ | |
697 | blkg_path(bfqg_to_blkg(bfqg), bfqg->blkg_path, sizeof(bfqg->blkg_path)); | |
ea25da48 PV |
698 | bic->blkcg_serial_nr = serial_nr; |
699 | out: | |
700 | rcu_read_unlock(); | |
701 | } | |
702 | ||
703 | /** | |
704 | * bfq_flush_idle_tree - deactivate any entity on the idle tree of @st. | |
705 | * @st: the service tree being flushed. | |
706 | */ | |
707 | static void bfq_flush_idle_tree(struct bfq_service_tree *st) | |
708 | { | |
709 | struct bfq_entity *entity = st->first_idle; | |
710 | ||
711 | for (; entity ; entity = st->first_idle) | |
712 | __bfq_deactivate_entity(entity, false); | |
713 | } | |
714 | ||
715 | /** | |
716 | * bfq_reparent_leaf_entity - move leaf entity to the root_group. | |
717 | * @bfqd: the device data structure with the root group. | |
718 | * @entity: the entity to move. | |
719 | */ | |
720 | static void bfq_reparent_leaf_entity(struct bfq_data *bfqd, | |
721 | struct bfq_entity *entity) | |
722 | { | |
723 | struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); | |
724 | ||
725 | bfq_bfqq_move(bfqd, bfqq, bfqd->root_group); | |
726 | } | |
727 | ||
728 | /** | |
729 | * bfq_reparent_active_entities - move to the root group all active | |
730 | * entities. | |
731 | * @bfqd: the device data structure with the root group. | |
732 | * @bfqg: the group to move from. | |
733 | * @st: the service tree with the entities. | |
ea25da48 PV |
734 | */ |
735 | static void bfq_reparent_active_entities(struct bfq_data *bfqd, | |
736 | struct bfq_group *bfqg, | |
737 | struct bfq_service_tree *st) | |
738 | { | |
739 | struct rb_root *active = &st->active; | |
740 | struct bfq_entity *entity = NULL; | |
741 | ||
742 | if (!RB_EMPTY_ROOT(&st->active)) | |
743 | entity = bfq_entity_of(rb_first(active)); | |
744 | ||
745 | for (; entity ; entity = bfq_entity_of(rb_first(active))) | |
746 | bfq_reparent_leaf_entity(bfqd, entity); | |
747 | ||
748 | if (bfqg->sched_data.in_service_entity) | |
749 | bfq_reparent_leaf_entity(bfqd, | |
750 | bfqg->sched_data.in_service_entity); | |
751 | } | |
752 | ||
753 | /** | |
754 | * bfq_pd_offline - deactivate the entity associated with @pd, | |
755 | * and reparent its children entities. | |
756 | * @pd: descriptor of the policy going offline. | |
757 | * | |
758 | * blkio already grabs the queue_lock for us, so no need to use | |
759 | * RCU-based magic | |
760 | */ | |
dfb79af5 | 761 | static void bfq_pd_offline(struct blkg_policy_data *pd) |
ea25da48 PV |
762 | { |
763 | struct bfq_service_tree *st; | |
764 | struct bfq_group *bfqg = pd_to_bfqg(pd); | |
765 | struct bfq_data *bfqd = bfqg->bfqd; | |
766 | struct bfq_entity *entity = bfqg->my_entity; | |
767 | unsigned long flags; | |
768 | int i; | |
769 | ||
52257ffb PV |
770 | spin_lock_irqsave(&bfqd->lock, flags); |
771 | ||
ea25da48 | 772 | if (!entity) /* root group */ |
52257ffb | 773 | goto put_async_queues; |
ea25da48 | 774 | |
ea25da48 PV |
775 | /* |
776 | * Empty all service_trees belonging to this group before | |
777 | * deactivating the group itself. | |
778 | */ | |
779 | for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) { | |
780 | st = bfqg->sched_data.service_tree + i; | |
781 | ||
782 | /* | |
783 | * The idle tree may still contain bfq_queues belonging | |
784 | * to exited task because they never migrated to a different | |
8f9bebc3 | 785 | * cgroup from the one being destroyed now. |
ea25da48 PV |
786 | */ |
787 | bfq_flush_idle_tree(st); | |
788 | ||
789 | /* | |
790 | * It may happen that some queues are still active | |
791 | * (busy) upon group destruction (if the corresponding | |
792 | * processes have been forced to terminate). We move | |
793 | * all the leaf entities corresponding to these queues | |
794 | * to the root_group. | |
795 | * Also, it may happen that the group has an entity | |
796 | * in service, which is disconnected from the active | |
797 | * tree: it must be moved, too. | |
798 | * There is no need to put the sync queues, as the | |
799 | * scheduler has taken no reference. | |
800 | */ | |
801 | bfq_reparent_active_entities(bfqd, bfqg, st); | |
802 | } | |
803 | ||
804 | __bfq_deactivate_entity(entity, false); | |
52257ffb PV |
805 | |
806 | put_async_queues: | |
ea25da48 PV |
807 | bfq_put_async_queues(bfqd, bfqg); |
808 | ||
809 | spin_unlock_irqrestore(&bfqd->lock, flags); | |
810 | /* | |
811 | * @blkg is going offline and will be ignored by | |
812 | * blkg_[rw]stat_recursive_sum(). Transfer stats to the parent so | |
813 | * that they don't get lost. If IOs complete after this point, the | |
814 | * stats for them will be lost. Oh well... | |
815 | */ | |
816 | bfqg_stats_xfer_dead(bfqg); | |
817 | } | |
818 | ||
819 | void bfq_end_wr_async(struct bfq_data *bfqd) | |
820 | { | |
821 | struct blkcg_gq *blkg; | |
822 | ||
823 | list_for_each_entry(blkg, &bfqd->queue->blkg_list, q_node) { | |
824 | struct bfq_group *bfqg = blkg_to_bfqg(blkg); | |
825 | ||
826 | bfq_end_wr_async_queues(bfqd, bfqg); | |
827 | } | |
828 | bfq_end_wr_async_queues(bfqd, bfqd->root_group); | |
829 | } | |
830 | ||
831 | static int bfq_io_show_weight(struct seq_file *sf, void *v) | |
832 | { | |
833 | struct blkcg *blkcg = css_to_blkcg(seq_css(sf)); | |
834 | struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg); | |
835 | unsigned int val = 0; | |
836 | ||
837 | if (bfqgd) | |
838 | val = bfqgd->weight; | |
839 | ||
840 | seq_printf(sf, "%u\n", val); | |
841 | ||
842 | return 0; | |
843 | } | |
844 | ||
845 | static int bfq_io_set_weight_legacy(struct cgroup_subsys_state *css, | |
846 | struct cftype *cftype, | |
847 | u64 val) | |
848 | { | |
849 | struct blkcg *blkcg = css_to_blkcg(css); | |
850 | struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg); | |
851 | struct blkcg_gq *blkg; | |
852 | int ret = -ERANGE; | |
853 | ||
854 | if (val < BFQ_MIN_WEIGHT || val > BFQ_MAX_WEIGHT) | |
855 | return ret; | |
856 | ||
857 | ret = 0; | |
858 | spin_lock_irq(&blkcg->lock); | |
859 | bfqgd->weight = (unsigned short)val; | |
860 | hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) { | |
861 | struct bfq_group *bfqg = blkg_to_bfqg(blkg); | |
862 | ||
863 | if (!bfqg) | |
864 | continue; | |
865 | /* | |
866 | * Setting the prio_changed flag of the entity | |
867 | * to 1 with new_weight == weight would re-set | |
868 | * the value of the weight to its ioprio mapping. | |
869 | * Set the flag only if necessary. | |
870 | */ | |
871 | if ((unsigned short)val != bfqg->entity.new_weight) { | |
872 | bfqg->entity.new_weight = (unsigned short)val; | |
873 | /* | |
874 | * Make sure that the above new value has been | |
875 | * stored in bfqg->entity.new_weight before | |
876 | * setting the prio_changed flag. In fact, | |
877 | * this flag may be read asynchronously (in | |
878 | * critical sections protected by a different | |
879 | * lock than that held here), and finding this | |
880 | * flag set may cause the execution of the code | |
881 | * for updating parameters whose value may | |
882 | * depend also on bfqg->entity.new_weight (in | |
883 | * __bfq_entity_update_weight_prio). | |
884 | * This barrier makes sure that the new value | |
885 | * of bfqg->entity.new_weight is correctly | |
886 | * seen in that code. | |
887 | */ | |
888 | smp_wmb(); | |
889 | bfqg->entity.prio_changed = 1; | |
890 | } | |
891 | } | |
892 | spin_unlock_irq(&blkcg->lock); | |
893 | ||
894 | return ret; | |
895 | } | |
896 | ||
897 | static ssize_t bfq_io_set_weight(struct kernfs_open_file *of, | |
898 | char *buf, size_t nbytes, | |
899 | loff_t off) | |
900 | { | |
901 | u64 weight; | |
902 | /* First unsigned long found in the file is used */ | |
903 | int ret = kstrtoull(strim(buf), 0, &weight); | |
904 | ||
905 | if (ret) | |
906 | return ret; | |
907 | ||
fc8ebd01 MS |
908 | ret = bfq_io_set_weight_legacy(of_css(of), NULL, weight); |
909 | return ret ?: nbytes; | |
ea25da48 PV |
910 | } |
911 | ||
a33801e8 | 912 | #ifdef CONFIG_DEBUG_BLK_CGROUP |
ea25da48 PV |
913 | static int bfqg_print_stat(struct seq_file *sf, void *v) |
914 | { | |
915 | blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_stat, | |
916 | &blkcg_policy_bfq, seq_cft(sf)->private, false); | |
917 | return 0; | |
918 | } | |
919 | ||
920 | static int bfqg_print_rwstat(struct seq_file *sf, void *v) | |
921 | { | |
922 | blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_rwstat, | |
923 | &blkcg_policy_bfq, seq_cft(sf)->private, true); | |
924 | return 0; | |
925 | } | |
926 | ||
927 | static u64 bfqg_prfill_stat_recursive(struct seq_file *sf, | |
928 | struct blkg_policy_data *pd, int off) | |
929 | { | |
930 | u64 sum = blkg_stat_recursive_sum(pd_to_blkg(pd), | |
931 | &blkcg_policy_bfq, off); | |
932 | return __blkg_prfill_u64(sf, pd, sum); | |
933 | } | |
934 | ||
935 | static u64 bfqg_prfill_rwstat_recursive(struct seq_file *sf, | |
936 | struct blkg_policy_data *pd, int off) | |
937 | { | |
938 | struct blkg_rwstat sum = blkg_rwstat_recursive_sum(pd_to_blkg(pd), | |
939 | &blkcg_policy_bfq, | |
940 | off); | |
941 | return __blkg_prfill_rwstat(sf, pd, &sum); | |
942 | } | |
943 | ||
944 | static int bfqg_print_stat_recursive(struct seq_file *sf, void *v) | |
945 | { | |
946 | blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), | |
947 | bfqg_prfill_stat_recursive, &blkcg_policy_bfq, | |
948 | seq_cft(sf)->private, false); | |
949 | return 0; | |
950 | } | |
951 | ||
952 | static int bfqg_print_rwstat_recursive(struct seq_file *sf, void *v) | |
953 | { | |
954 | blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), | |
955 | bfqg_prfill_rwstat_recursive, &blkcg_policy_bfq, | |
956 | seq_cft(sf)->private, true); | |
957 | return 0; | |
958 | } | |
959 | ||
960 | static u64 bfqg_prfill_sectors(struct seq_file *sf, struct blkg_policy_data *pd, | |
961 | int off) | |
962 | { | |
963 | u64 sum = blkg_rwstat_total(&pd->blkg->stat_bytes); | |
964 | ||
965 | return __blkg_prfill_u64(sf, pd, sum >> 9); | |
966 | } | |
967 | ||
968 | static int bfqg_print_stat_sectors(struct seq_file *sf, void *v) | |
969 | { | |
970 | blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), | |
971 | bfqg_prfill_sectors, &blkcg_policy_bfq, 0, false); | |
972 | return 0; | |
973 | } | |
974 | ||
975 | static u64 bfqg_prfill_sectors_recursive(struct seq_file *sf, | |
976 | struct blkg_policy_data *pd, int off) | |
977 | { | |
978 | struct blkg_rwstat tmp = blkg_rwstat_recursive_sum(pd->blkg, NULL, | |
979 | offsetof(struct blkcg_gq, stat_bytes)); | |
980 | u64 sum = atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_READ]) + | |
981 | atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]); | |
982 | ||
983 | return __blkg_prfill_u64(sf, pd, sum >> 9); | |
984 | } | |
985 | ||
986 | static int bfqg_print_stat_sectors_recursive(struct seq_file *sf, void *v) | |
987 | { | |
988 | blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), | |
989 | bfqg_prfill_sectors_recursive, &blkcg_policy_bfq, 0, | |
990 | false); | |
991 | return 0; | |
992 | } | |
993 | ||
994 | static u64 bfqg_prfill_avg_queue_size(struct seq_file *sf, | |
995 | struct blkg_policy_data *pd, int off) | |
996 | { | |
997 | struct bfq_group *bfqg = pd_to_bfqg(pd); | |
998 | u64 samples = blkg_stat_read(&bfqg->stats.avg_queue_size_samples); | |
999 | u64 v = 0; | |
1000 | ||
1001 | if (samples) { | |
1002 | v = blkg_stat_read(&bfqg->stats.avg_queue_size_sum); | |
1003 | v = div64_u64(v, samples); | |
1004 | } | |
1005 | __blkg_prfill_u64(sf, pd, v); | |
1006 | return 0; | |
1007 | } | |
1008 | ||
1009 | /* print avg_queue_size */ | |
1010 | static int bfqg_print_avg_queue_size(struct seq_file *sf, void *v) | |
1011 | { | |
1012 | blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), | |
1013 | bfqg_prfill_avg_queue_size, &blkcg_policy_bfq, | |
1014 | 0, false); | |
1015 | return 0; | |
1016 | } | |
a33801e8 | 1017 | #endif /* CONFIG_DEBUG_BLK_CGROUP */ |
ea25da48 PV |
1018 | |
1019 | struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node) | |
1020 | { | |
1021 | int ret; | |
1022 | ||
1023 | ret = blkcg_activate_policy(bfqd->queue, &blkcg_policy_bfq); | |
1024 | if (ret) | |
1025 | return NULL; | |
1026 | ||
1027 | return blkg_to_bfqg(bfqd->queue->root_blkg); | |
1028 | } | |
1029 | ||
1030 | struct blkcg_policy blkcg_policy_bfq = { | |
1031 | .dfl_cftypes = bfq_blkg_files, | |
1032 | .legacy_cftypes = bfq_blkcg_legacy_files, | |
1033 | ||
1034 | .cpd_alloc_fn = bfq_cpd_alloc, | |
1035 | .cpd_init_fn = bfq_cpd_init, | |
1036 | .cpd_bind_fn = bfq_cpd_init, | |
1037 | .cpd_free_fn = bfq_cpd_free, | |
1038 | ||
1039 | .pd_alloc_fn = bfq_pd_alloc, | |
1040 | .pd_init_fn = bfq_pd_init, | |
1041 | .pd_offline_fn = bfq_pd_offline, | |
1042 | .pd_free_fn = bfq_pd_free, | |
1043 | .pd_reset_stats_fn = bfq_pd_reset_stats, | |
1044 | }; | |
1045 | ||
1046 | struct cftype bfq_blkcg_legacy_files[] = { | |
1047 | { | |
1048 | .name = "bfq.weight", | |
1049 | .flags = CFTYPE_NOT_ON_ROOT, | |
1050 | .seq_show = bfq_io_show_weight, | |
1051 | .write_u64 = bfq_io_set_weight_legacy, | |
1052 | }, | |
1053 | ||
1054 | /* statistics, covers only the tasks in the bfqg */ | |
ea25da48 PV |
1055 | { |
1056 | .name = "bfq.io_service_bytes", | |
1057 | .private = (unsigned long)&blkcg_policy_bfq, | |
1058 | .seq_show = blkg_print_stat_bytes, | |
1059 | }, | |
1060 | { | |
1061 | .name = "bfq.io_serviced", | |
1062 | .private = (unsigned long)&blkcg_policy_bfq, | |
1063 | .seq_show = blkg_print_stat_ios, | |
1064 | }, | |
a33801e8 LM |
1065 | #ifdef CONFIG_DEBUG_BLK_CGROUP |
1066 | { | |
1067 | .name = "bfq.time", | |
1068 | .private = offsetof(struct bfq_group, stats.time), | |
1069 | .seq_show = bfqg_print_stat, | |
1070 | }, | |
1071 | { | |
1072 | .name = "bfq.sectors", | |
1073 | .seq_show = bfqg_print_stat_sectors, | |
1074 | }, | |
ea25da48 PV |
1075 | { |
1076 | .name = "bfq.io_service_time", | |
1077 | .private = offsetof(struct bfq_group, stats.service_time), | |
1078 | .seq_show = bfqg_print_rwstat, | |
1079 | }, | |
1080 | { | |
1081 | .name = "bfq.io_wait_time", | |
1082 | .private = offsetof(struct bfq_group, stats.wait_time), | |
1083 | .seq_show = bfqg_print_rwstat, | |
1084 | }, | |
1085 | { | |
1086 | .name = "bfq.io_merged", | |
1087 | .private = offsetof(struct bfq_group, stats.merged), | |
1088 | .seq_show = bfqg_print_rwstat, | |
1089 | }, | |
1090 | { | |
1091 | .name = "bfq.io_queued", | |
1092 | .private = offsetof(struct bfq_group, stats.queued), | |
1093 | .seq_show = bfqg_print_rwstat, | |
1094 | }, | |
a33801e8 | 1095 | #endif /* CONFIG_DEBUG_BLK_CGROUP */ |
ea25da48 | 1096 | |
636b8fe8 | 1097 | /* the same statistics which cover the bfqg and its descendants */ |
ea25da48 PV |
1098 | { |
1099 | .name = "bfq.io_service_bytes_recursive", | |
1100 | .private = (unsigned long)&blkcg_policy_bfq, | |
1101 | .seq_show = blkg_print_stat_bytes_recursive, | |
1102 | }, | |
1103 | { | |
1104 | .name = "bfq.io_serviced_recursive", | |
1105 | .private = (unsigned long)&blkcg_policy_bfq, | |
1106 | .seq_show = blkg_print_stat_ios_recursive, | |
1107 | }, | |
a33801e8 LM |
1108 | #ifdef CONFIG_DEBUG_BLK_CGROUP |
1109 | { | |
1110 | .name = "bfq.time_recursive", | |
1111 | .private = offsetof(struct bfq_group, stats.time), | |
1112 | .seq_show = bfqg_print_stat_recursive, | |
1113 | }, | |
1114 | { | |
1115 | .name = "bfq.sectors_recursive", | |
1116 | .seq_show = bfqg_print_stat_sectors_recursive, | |
1117 | }, | |
ea25da48 PV |
1118 | { |
1119 | .name = "bfq.io_service_time_recursive", | |
1120 | .private = offsetof(struct bfq_group, stats.service_time), | |
1121 | .seq_show = bfqg_print_rwstat_recursive, | |
1122 | }, | |
1123 | { | |
1124 | .name = "bfq.io_wait_time_recursive", | |
1125 | .private = offsetof(struct bfq_group, stats.wait_time), | |
1126 | .seq_show = bfqg_print_rwstat_recursive, | |
1127 | }, | |
1128 | { | |
1129 | .name = "bfq.io_merged_recursive", | |
1130 | .private = offsetof(struct bfq_group, stats.merged), | |
1131 | .seq_show = bfqg_print_rwstat_recursive, | |
1132 | }, | |
1133 | { | |
1134 | .name = "bfq.io_queued_recursive", | |
1135 | .private = offsetof(struct bfq_group, stats.queued), | |
1136 | .seq_show = bfqg_print_rwstat_recursive, | |
1137 | }, | |
1138 | { | |
1139 | .name = "bfq.avg_queue_size", | |
1140 | .seq_show = bfqg_print_avg_queue_size, | |
1141 | }, | |
1142 | { | |
1143 | .name = "bfq.group_wait_time", | |
1144 | .private = offsetof(struct bfq_group, stats.group_wait_time), | |
1145 | .seq_show = bfqg_print_stat, | |
1146 | }, | |
1147 | { | |
1148 | .name = "bfq.idle_time", | |
1149 | .private = offsetof(struct bfq_group, stats.idle_time), | |
1150 | .seq_show = bfqg_print_stat, | |
1151 | }, | |
1152 | { | |
1153 | .name = "bfq.empty_time", | |
1154 | .private = offsetof(struct bfq_group, stats.empty_time), | |
1155 | .seq_show = bfqg_print_stat, | |
1156 | }, | |
1157 | { | |
1158 | .name = "bfq.dequeue", | |
1159 | .private = offsetof(struct bfq_group, stats.dequeue), | |
1160 | .seq_show = bfqg_print_stat, | |
1161 | }, | |
a33801e8 | 1162 | #endif /* CONFIG_DEBUG_BLK_CGROUP */ |
ea25da48 PV |
1163 | { } /* terminate */ |
1164 | }; | |
1165 | ||
1166 | struct cftype bfq_blkg_files[] = { | |
1167 | { | |
1168 | .name = "bfq.weight", | |
1169 | .flags = CFTYPE_NOT_ON_ROOT, | |
1170 | .seq_show = bfq_io_show_weight, | |
1171 | .write = bfq_io_set_weight, | |
1172 | }, | |
1173 | {} /* terminate */ | |
1174 | }; | |
1175 | ||
1176 | #else /* CONFIG_BFQ_GROUP_IOSCHED */ | |
1177 | ||
ea25da48 PV |
1178 | void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, |
1179 | struct bfq_group *bfqg) {} | |
1180 | ||
1181 | void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg) | |
1182 | { | |
1183 | struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); | |
1184 | ||
1185 | entity->weight = entity->new_weight; | |
1186 | entity->orig_weight = entity->new_weight; | |
1187 | if (bfqq) { | |
1188 | bfqq->ioprio = bfqq->new_ioprio; | |
1189 | bfqq->ioprio_class = bfqq->new_ioprio_class; | |
1190 | } | |
1191 | entity->sched_data = &bfqg->sched_data; | |
1192 | } | |
1193 | ||
1194 | void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) {} | |
1195 | ||
1196 | void bfq_end_wr_async(struct bfq_data *bfqd) | |
1197 | { | |
1198 | bfq_end_wr_async_queues(bfqd, bfqd->root_group); | |
1199 | } | |
1200 | ||
1201 | struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd, struct blkcg *blkcg) | |
1202 | { | |
1203 | return bfqd->root_group; | |
1204 | } | |
1205 | ||
1206 | struct bfq_group *bfqq_group(struct bfq_queue *bfqq) | |
1207 | { | |
1208 | return bfqq->bfqd->root_group; | |
1209 | } | |
1210 | ||
1211 | struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node) | |
1212 | { | |
1213 | struct bfq_group *bfqg; | |
1214 | int i; | |
1215 | ||
1216 | bfqg = kmalloc_node(sizeof(*bfqg), GFP_KERNEL | __GFP_ZERO, node); | |
1217 | if (!bfqg) | |
1218 | return NULL; | |
1219 | ||
1220 | for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) | |
1221 | bfqg->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT; | |
1222 | ||
1223 | return bfqg; | |
1224 | } | |
1225 | #endif /* CONFIG_BFQ_GROUP_IOSCHED */ |