Commit | Line | Data |
---|---|---|
ea25da48 PV |
1 | /* |
2 | * cgroups support for the BFQ I/O scheduler. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public License as | |
6 | * published by the Free Software Foundation; either version 2 of the | |
7 | * License, or (at your option) any later version. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
12 | * General Public License for more details. | |
13 | */ | |
14 | #include <linux/module.h> | |
15 | #include <linux/slab.h> | |
16 | #include <linux/blkdev.h> | |
17 | #include <linux/cgroup.h> | |
18 | #include <linux/elevator.h> | |
19 | #include <linux/ktime.h> | |
20 | #include <linux/rbtree.h> | |
21 | #include <linux/ioprio.h> | |
22 | #include <linux/sbitmap.h> | |
23 | #include <linux/delay.h> | |
24 | ||
25 | #include "bfq-iosched.h" | |
26 | ||
a33801e8 | 27 | #if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP) |
ea25da48 PV |
28 | |
29 | /* bfqg stats flags */ | |
30 | enum bfqg_stats_flags { | |
31 | BFQG_stats_waiting = 0, | |
32 | BFQG_stats_idling, | |
33 | BFQG_stats_empty, | |
34 | }; | |
35 | ||
36 | #define BFQG_FLAG_FNS(name) \ | |
37 | static void bfqg_stats_mark_##name(struct bfqg_stats *stats) \ | |
38 | { \ | |
39 | stats->flags |= (1 << BFQG_stats_##name); \ | |
40 | } \ | |
41 | static void bfqg_stats_clear_##name(struct bfqg_stats *stats) \ | |
42 | { \ | |
43 | stats->flags &= ~(1 << BFQG_stats_##name); \ | |
44 | } \ | |
45 | static int bfqg_stats_##name(struct bfqg_stats *stats) \ | |
46 | { \ | |
47 | return (stats->flags & (1 << BFQG_stats_##name)) != 0; \ | |
48 | } \ | |
49 | ||
50 | BFQG_FLAG_FNS(waiting) | |
51 | BFQG_FLAG_FNS(idling) | |
52 | BFQG_FLAG_FNS(empty) | |
53 | #undef BFQG_FLAG_FNS | |
54 | ||
8f9bebc3 | 55 | /* This should be called with the scheduler lock held. */ |
ea25da48 PV |
56 | static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats) |
57 | { | |
58 | unsigned long long now; | |
59 | ||
60 | if (!bfqg_stats_waiting(stats)) | |
61 | return; | |
62 | ||
63 | now = sched_clock(); | |
64 | if (time_after64(now, stats->start_group_wait_time)) | |
65 | blkg_stat_add(&stats->group_wait_time, | |
66 | now - stats->start_group_wait_time); | |
67 | bfqg_stats_clear_waiting(stats); | |
68 | } | |
69 | ||
8f9bebc3 | 70 | /* This should be called with the scheduler lock held. */ |
ea25da48 PV |
71 | static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg, |
72 | struct bfq_group *curr_bfqg) | |
73 | { | |
74 | struct bfqg_stats *stats = &bfqg->stats; | |
75 | ||
76 | if (bfqg_stats_waiting(stats)) | |
77 | return; | |
78 | if (bfqg == curr_bfqg) | |
79 | return; | |
80 | stats->start_group_wait_time = sched_clock(); | |
81 | bfqg_stats_mark_waiting(stats); | |
82 | } | |
83 | ||
8f9bebc3 | 84 | /* This should be called with the scheduler lock held. */ |
ea25da48 PV |
85 | static void bfqg_stats_end_empty_time(struct bfqg_stats *stats) |
86 | { | |
87 | unsigned long long now; | |
88 | ||
89 | if (!bfqg_stats_empty(stats)) | |
90 | return; | |
91 | ||
92 | now = sched_clock(); | |
93 | if (time_after64(now, stats->start_empty_time)) | |
94 | blkg_stat_add(&stats->empty_time, | |
95 | now - stats->start_empty_time); | |
96 | bfqg_stats_clear_empty(stats); | |
97 | } | |
98 | ||
99 | void bfqg_stats_update_dequeue(struct bfq_group *bfqg) | |
100 | { | |
101 | blkg_stat_add(&bfqg->stats.dequeue, 1); | |
102 | } | |
103 | ||
104 | void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg) | |
105 | { | |
106 | struct bfqg_stats *stats = &bfqg->stats; | |
107 | ||
108 | if (blkg_rwstat_total(&stats->queued)) | |
109 | return; | |
110 | ||
111 | /* | |
112 | * group is already marked empty. This can happen if bfqq got new | |
113 | * request in parent group and moved to this group while being added | |
114 | * to service tree. Just ignore the event and move on. | |
115 | */ | |
116 | if (bfqg_stats_empty(stats)) | |
117 | return; | |
118 | ||
119 | stats->start_empty_time = sched_clock(); | |
120 | bfqg_stats_mark_empty(stats); | |
121 | } | |
122 | ||
123 | void bfqg_stats_update_idle_time(struct bfq_group *bfqg) | |
124 | { | |
125 | struct bfqg_stats *stats = &bfqg->stats; | |
126 | ||
127 | if (bfqg_stats_idling(stats)) { | |
128 | unsigned long long now = sched_clock(); | |
129 | ||
130 | if (time_after64(now, stats->start_idle_time)) | |
131 | blkg_stat_add(&stats->idle_time, | |
132 | now - stats->start_idle_time); | |
133 | bfqg_stats_clear_idling(stats); | |
134 | } | |
135 | } | |
136 | ||
137 | void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg) | |
138 | { | |
139 | struct bfqg_stats *stats = &bfqg->stats; | |
140 | ||
141 | stats->start_idle_time = sched_clock(); | |
142 | bfqg_stats_mark_idling(stats); | |
143 | } | |
144 | ||
145 | void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg) | |
146 | { | |
147 | struct bfqg_stats *stats = &bfqg->stats; | |
148 | ||
149 | blkg_stat_add(&stats->avg_queue_size_sum, | |
150 | blkg_rwstat_total(&stats->queued)); | |
151 | blkg_stat_add(&stats->avg_queue_size_samples, 1); | |
152 | bfqg_stats_update_group_wait_time(stats); | |
153 | } | |
154 | ||
a33801e8 LM |
155 | void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq, |
156 | unsigned int op) | |
157 | { | |
158 | blkg_rwstat_add(&bfqg->stats.queued, op, 1); | |
159 | bfqg_stats_end_empty_time(&bfqg->stats); | |
160 | if (!(bfqq == ((struct bfq_data *)bfqg->bfqd)->in_service_queue)) | |
161 | bfqg_stats_set_start_group_wait_time(bfqg, bfqq_group(bfqq)); | |
162 | } | |
163 | ||
164 | void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op) | |
165 | { | |
166 | blkg_rwstat_add(&bfqg->stats.queued, op, -1); | |
167 | } | |
168 | ||
169 | void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op) | |
170 | { | |
171 | blkg_rwstat_add(&bfqg->stats.merged, op, 1); | |
172 | } | |
173 | ||
174 | void bfqg_stats_update_completion(struct bfq_group *bfqg, uint64_t start_time, | |
175 | uint64_t io_start_time, unsigned int op) | |
176 | { | |
177 | struct bfqg_stats *stats = &bfqg->stats; | |
178 | unsigned long long now = sched_clock(); | |
179 | ||
180 | if (time_after64(now, io_start_time)) | |
181 | blkg_rwstat_add(&stats->service_time, op, | |
182 | now - io_start_time); | |
183 | if (time_after64(io_start_time, start_time)) | |
184 | blkg_rwstat_add(&stats->wait_time, op, | |
185 | io_start_time - start_time); | |
186 | } | |
187 | ||
188 | #else /* CONFIG_BFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */ | |
189 | ||
190 | void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq, | |
191 | unsigned int op) { } | |
192 | void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op) { } | |
193 | void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op) { } | |
194 | void bfqg_stats_update_completion(struct bfq_group *bfqg, uint64_t start_time, | |
195 | uint64_t io_start_time, unsigned int op) { } | |
196 | void bfqg_stats_update_dequeue(struct bfq_group *bfqg) { } | |
197 | void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg) { } | |
198 | void bfqg_stats_update_idle_time(struct bfq_group *bfqg) { } | |
199 | void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg) { } | |
200 | void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg) { } | |
201 | ||
202 | #endif /* CONFIG_BFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */ | |
203 | ||
204 | #ifdef CONFIG_BFQ_GROUP_IOSCHED | |
205 | ||
ea25da48 PV |
206 | /* |
207 | * blk-cgroup policy-related handlers | |
208 | * The following functions help in converting between blk-cgroup | |
209 | * internal structures and BFQ-specific structures. | |
210 | */ | |
211 | ||
212 | static struct bfq_group *pd_to_bfqg(struct blkg_policy_data *pd) | |
213 | { | |
214 | return pd ? container_of(pd, struct bfq_group, pd) : NULL; | |
215 | } | |
216 | ||
217 | struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg) | |
218 | { | |
219 | return pd_to_blkg(&bfqg->pd); | |
220 | } | |
221 | ||
222 | static struct bfq_group *blkg_to_bfqg(struct blkcg_gq *blkg) | |
223 | { | |
224 | return pd_to_bfqg(blkg_to_pd(blkg, &blkcg_policy_bfq)); | |
225 | } | |
226 | ||
227 | /* | |
228 | * bfq_group handlers | |
229 | * The following functions help in navigating the bfq_group hierarchy | |
230 | * by allowing to find the parent of a bfq_group or the bfq_group | |
231 | * associated to a bfq_queue. | |
232 | */ | |
233 | ||
234 | static struct bfq_group *bfqg_parent(struct bfq_group *bfqg) | |
235 | { | |
236 | struct blkcg_gq *pblkg = bfqg_to_blkg(bfqg)->parent; | |
237 | ||
238 | return pblkg ? blkg_to_bfqg(pblkg) : NULL; | |
239 | } | |
240 | ||
241 | struct bfq_group *bfqq_group(struct bfq_queue *bfqq) | |
242 | { | |
243 | struct bfq_entity *group_entity = bfqq->entity.parent; | |
244 | ||
245 | return group_entity ? container_of(group_entity, struct bfq_group, | |
246 | entity) : | |
247 | bfqq->bfqd->root_group; | |
248 | } | |
249 | ||
250 | /* | |
251 | * The following two functions handle get and put of a bfq_group by | |
252 | * wrapping the related blk-cgroup hooks. | |
253 | */ | |
254 | ||
255 | static void bfqg_get(struct bfq_group *bfqg) | |
256 | { | |
8f9bebc3 | 257 | bfqg->ref++; |
ea25da48 PV |
258 | } |
259 | ||
dfb79af5 | 260 | static void bfqg_put(struct bfq_group *bfqg) |
ea25da48 | 261 | { |
8f9bebc3 PV |
262 | bfqg->ref--; |
263 | ||
264 | if (bfqg->ref == 0) | |
265 | kfree(bfqg); | |
266 | } | |
267 | ||
268 | static void bfqg_and_blkg_get(struct bfq_group *bfqg) | |
269 | { | |
270 | /* see comments in bfq_bic_update_cgroup for why refcounting bfqg */ | |
271 | bfqg_get(bfqg); | |
272 | ||
273 | blkg_get(bfqg_to_blkg(bfqg)); | |
274 | } | |
275 | ||
276 | void bfqg_and_blkg_put(struct bfq_group *bfqg) | |
277 | { | |
278 | bfqg_put(bfqg); | |
279 | ||
280 | blkg_put(bfqg_to_blkg(bfqg)); | |
ea25da48 PV |
281 | } |
282 | ||
ea25da48 PV |
283 | /* @stats = 0 */ |
284 | static void bfqg_stats_reset(struct bfqg_stats *stats) | |
285 | { | |
a33801e8 | 286 | #ifdef CONFIG_DEBUG_BLK_CGROUP |
ea25da48 PV |
287 | /* queued stats shouldn't be cleared */ |
288 | blkg_rwstat_reset(&stats->merged); | |
289 | blkg_rwstat_reset(&stats->service_time); | |
290 | blkg_rwstat_reset(&stats->wait_time); | |
291 | blkg_stat_reset(&stats->time); | |
292 | blkg_stat_reset(&stats->avg_queue_size_sum); | |
293 | blkg_stat_reset(&stats->avg_queue_size_samples); | |
294 | blkg_stat_reset(&stats->dequeue); | |
295 | blkg_stat_reset(&stats->group_wait_time); | |
296 | blkg_stat_reset(&stats->idle_time); | |
297 | blkg_stat_reset(&stats->empty_time); | |
a33801e8 | 298 | #endif |
ea25da48 PV |
299 | } |
300 | ||
301 | /* @to += @from */ | |
302 | static void bfqg_stats_add_aux(struct bfqg_stats *to, struct bfqg_stats *from) | |
303 | { | |
304 | if (!to || !from) | |
305 | return; | |
306 | ||
a33801e8 | 307 | #ifdef CONFIG_DEBUG_BLK_CGROUP |
ea25da48 PV |
308 | /* queued stats shouldn't be cleared */ |
309 | blkg_rwstat_add_aux(&to->merged, &from->merged); | |
310 | blkg_rwstat_add_aux(&to->service_time, &from->service_time); | |
311 | blkg_rwstat_add_aux(&to->wait_time, &from->wait_time); | |
312 | blkg_stat_add_aux(&from->time, &from->time); | |
313 | blkg_stat_add_aux(&to->avg_queue_size_sum, &from->avg_queue_size_sum); | |
314 | blkg_stat_add_aux(&to->avg_queue_size_samples, | |
315 | &from->avg_queue_size_samples); | |
316 | blkg_stat_add_aux(&to->dequeue, &from->dequeue); | |
317 | blkg_stat_add_aux(&to->group_wait_time, &from->group_wait_time); | |
318 | blkg_stat_add_aux(&to->idle_time, &from->idle_time); | |
319 | blkg_stat_add_aux(&to->empty_time, &from->empty_time); | |
a33801e8 | 320 | #endif |
ea25da48 PV |
321 | } |
322 | ||
323 | /* | |
324 | * Transfer @bfqg's stats to its parent's aux counts so that the ancestors' | |
325 | * recursive stats can still account for the amount used by this bfqg after | |
326 | * it's gone. | |
327 | */ | |
328 | static void bfqg_stats_xfer_dead(struct bfq_group *bfqg) | |
329 | { | |
330 | struct bfq_group *parent; | |
331 | ||
332 | if (!bfqg) /* root_group */ | |
333 | return; | |
334 | ||
335 | parent = bfqg_parent(bfqg); | |
336 | ||
337 | lockdep_assert_held(bfqg_to_blkg(bfqg)->q->queue_lock); | |
338 | ||
339 | if (unlikely(!parent)) | |
340 | return; | |
341 | ||
342 | bfqg_stats_add_aux(&parent->stats, &bfqg->stats); | |
343 | bfqg_stats_reset(&bfqg->stats); | |
344 | } | |
345 | ||
346 | void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg) | |
347 | { | |
348 | struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); | |
349 | ||
350 | entity->weight = entity->new_weight; | |
351 | entity->orig_weight = entity->new_weight; | |
352 | if (bfqq) { | |
353 | bfqq->ioprio = bfqq->new_ioprio; | |
354 | bfqq->ioprio_class = bfqq->new_ioprio_class; | |
8f9bebc3 PV |
355 | /* |
356 | * Make sure that bfqg and its associated blkg do not | |
357 | * disappear before entity. | |
358 | */ | |
359 | bfqg_and_blkg_get(bfqg); | |
ea25da48 PV |
360 | } |
361 | entity->parent = bfqg->my_entity; /* NULL for root group */ | |
362 | entity->sched_data = &bfqg->sched_data; | |
363 | } | |
364 | ||
365 | static void bfqg_stats_exit(struct bfqg_stats *stats) | |
366 | { | |
a33801e8 | 367 | #ifdef CONFIG_DEBUG_BLK_CGROUP |
ea25da48 PV |
368 | blkg_rwstat_exit(&stats->merged); |
369 | blkg_rwstat_exit(&stats->service_time); | |
370 | blkg_rwstat_exit(&stats->wait_time); | |
371 | blkg_rwstat_exit(&stats->queued); | |
372 | blkg_stat_exit(&stats->time); | |
373 | blkg_stat_exit(&stats->avg_queue_size_sum); | |
374 | blkg_stat_exit(&stats->avg_queue_size_samples); | |
375 | blkg_stat_exit(&stats->dequeue); | |
376 | blkg_stat_exit(&stats->group_wait_time); | |
377 | blkg_stat_exit(&stats->idle_time); | |
378 | blkg_stat_exit(&stats->empty_time); | |
a33801e8 | 379 | #endif |
ea25da48 PV |
380 | } |
381 | ||
382 | static int bfqg_stats_init(struct bfqg_stats *stats, gfp_t gfp) | |
383 | { | |
a33801e8 | 384 | #ifdef CONFIG_DEBUG_BLK_CGROUP |
ea25da48 PV |
385 | if (blkg_rwstat_init(&stats->merged, gfp) || |
386 | blkg_rwstat_init(&stats->service_time, gfp) || | |
387 | blkg_rwstat_init(&stats->wait_time, gfp) || | |
388 | blkg_rwstat_init(&stats->queued, gfp) || | |
389 | blkg_stat_init(&stats->time, gfp) || | |
390 | blkg_stat_init(&stats->avg_queue_size_sum, gfp) || | |
391 | blkg_stat_init(&stats->avg_queue_size_samples, gfp) || | |
392 | blkg_stat_init(&stats->dequeue, gfp) || | |
393 | blkg_stat_init(&stats->group_wait_time, gfp) || | |
394 | blkg_stat_init(&stats->idle_time, gfp) || | |
395 | blkg_stat_init(&stats->empty_time, gfp)) { | |
396 | bfqg_stats_exit(stats); | |
397 | return -ENOMEM; | |
398 | } | |
a33801e8 | 399 | #endif |
ea25da48 PV |
400 | |
401 | return 0; | |
402 | } | |
403 | ||
404 | static struct bfq_group_data *cpd_to_bfqgd(struct blkcg_policy_data *cpd) | |
405 | { | |
406 | return cpd ? container_of(cpd, struct bfq_group_data, pd) : NULL; | |
407 | } | |
408 | ||
409 | static struct bfq_group_data *blkcg_to_bfqgd(struct blkcg *blkcg) | |
410 | { | |
411 | return cpd_to_bfqgd(blkcg_to_cpd(blkcg, &blkcg_policy_bfq)); | |
412 | } | |
413 | ||
dfb79af5 | 414 | static struct blkcg_policy_data *bfq_cpd_alloc(gfp_t gfp) |
ea25da48 PV |
415 | { |
416 | struct bfq_group_data *bgd; | |
417 | ||
418 | bgd = kzalloc(sizeof(*bgd), gfp); | |
419 | if (!bgd) | |
420 | return NULL; | |
421 | return &bgd->pd; | |
422 | } | |
423 | ||
dfb79af5 | 424 | static void bfq_cpd_init(struct blkcg_policy_data *cpd) |
ea25da48 PV |
425 | { |
426 | struct bfq_group_data *d = cpd_to_bfqgd(cpd); | |
427 | ||
428 | d->weight = cgroup_subsys_on_dfl(io_cgrp_subsys) ? | |
429 | CGROUP_WEIGHT_DFL : BFQ_WEIGHT_LEGACY_DFL; | |
430 | } | |
431 | ||
dfb79af5 | 432 | static void bfq_cpd_free(struct blkcg_policy_data *cpd) |
ea25da48 PV |
433 | { |
434 | kfree(cpd_to_bfqgd(cpd)); | |
435 | } | |
436 | ||
dfb79af5 | 437 | static struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, int node) |
ea25da48 PV |
438 | { |
439 | struct bfq_group *bfqg; | |
440 | ||
441 | bfqg = kzalloc_node(sizeof(*bfqg), gfp, node); | |
442 | if (!bfqg) | |
443 | return NULL; | |
444 | ||
445 | if (bfqg_stats_init(&bfqg->stats, gfp)) { | |
446 | kfree(bfqg); | |
447 | return NULL; | |
448 | } | |
449 | ||
8f9bebc3 PV |
450 | /* see comments in bfq_bic_update_cgroup for why refcounting */ |
451 | bfqg_get(bfqg); | |
ea25da48 PV |
452 | return &bfqg->pd; |
453 | } | |
454 | ||
dfb79af5 | 455 | static void bfq_pd_init(struct blkg_policy_data *pd) |
ea25da48 PV |
456 | { |
457 | struct blkcg_gq *blkg = pd_to_blkg(pd); | |
458 | struct bfq_group *bfqg = blkg_to_bfqg(blkg); | |
459 | struct bfq_data *bfqd = blkg->q->elevator->elevator_data; | |
460 | struct bfq_entity *entity = &bfqg->entity; | |
461 | struct bfq_group_data *d = blkcg_to_bfqgd(blkg->blkcg); | |
462 | ||
463 | entity->orig_weight = entity->weight = entity->new_weight = d->weight; | |
464 | entity->my_sched_data = &bfqg->sched_data; | |
465 | bfqg->my_entity = entity; /* | |
466 | * the root_group's will be set to NULL | |
467 | * in bfq_init_queue() | |
468 | */ | |
469 | bfqg->bfqd = bfqd; | |
470 | bfqg->active_entities = 0; | |
471 | bfqg->rq_pos_tree = RB_ROOT; | |
472 | } | |
473 | ||
dfb79af5 | 474 | static void bfq_pd_free(struct blkg_policy_data *pd) |
ea25da48 PV |
475 | { |
476 | struct bfq_group *bfqg = pd_to_bfqg(pd); | |
477 | ||
478 | bfqg_stats_exit(&bfqg->stats); | |
8f9bebc3 | 479 | bfqg_put(bfqg); |
ea25da48 PV |
480 | } |
481 | ||
dfb79af5 | 482 | static void bfq_pd_reset_stats(struct blkg_policy_data *pd) |
ea25da48 PV |
483 | { |
484 | struct bfq_group *bfqg = pd_to_bfqg(pd); | |
485 | ||
486 | bfqg_stats_reset(&bfqg->stats); | |
487 | } | |
488 | ||
489 | static void bfq_group_set_parent(struct bfq_group *bfqg, | |
490 | struct bfq_group *parent) | |
491 | { | |
492 | struct bfq_entity *entity; | |
493 | ||
494 | entity = &bfqg->entity; | |
495 | entity->parent = parent->my_entity; | |
496 | entity->sched_data = &parent->sched_data; | |
497 | } | |
498 | ||
499 | static struct bfq_group *bfq_lookup_bfqg(struct bfq_data *bfqd, | |
500 | struct blkcg *blkcg) | |
501 | { | |
502 | struct blkcg_gq *blkg; | |
503 | ||
504 | blkg = blkg_lookup(blkcg, bfqd->queue); | |
505 | if (likely(blkg)) | |
506 | return blkg_to_bfqg(blkg); | |
507 | return NULL; | |
508 | } | |
509 | ||
510 | struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd, | |
511 | struct blkcg *blkcg) | |
512 | { | |
513 | struct bfq_group *bfqg, *parent; | |
514 | struct bfq_entity *entity; | |
515 | ||
516 | bfqg = bfq_lookup_bfqg(bfqd, blkcg); | |
517 | ||
518 | if (unlikely(!bfqg)) | |
519 | return NULL; | |
520 | ||
521 | /* | |
522 | * Update chain of bfq_groups as we might be handling a leaf group | |
523 | * which, along with some of its relatives, has not been hooked yet | |
524 | * to the private hierarchy of BFQ. | |
525 | */ | |
526 | entity = &bfqg->entity; | |
527 | for_each_entity(entity) { | |
528 | bfqg = container_of(entity, struct bfq_group, entity); | |
529 | if (bfqg != bfqd->root_group) { | |
530 | parent = bfqg_parent(bfqg); | |
531 | if (!parent) | |
532 | parent = bfqd->root_group; | |
533 | bfq_group_set_parent(bfqg, parent); | |
534 | } | |
535 | } | |
536 | ||
537 | return bfqg; | |
538 | } | |
539 | ||
540 | /** | |
541 | * bfq_bfqq_move - migrate @bfqq to @bfqg. | |
542 | * @bfqd: queue descriptor. | |
543 | * @bfqq: the queue to move. | |
544 | * @bfqg: the group to move to. | |
545 | * | |
546 | * Move @bfqq to @bfqg, deactivating it from its old group and reactivating | |
547 | * it on the new one. Avoid putting the entity on the old group idle tree. | |
548 | * | |
8f9bebc3 PV |
549 | * Must be called under the scheduler lock, to make sure that the blkg |
550 | * owning @bfqg does not disappear (see comments in | |
551 | * bfq_bic_update_cgroup on guaranteeing the consistency of blkg | |
552 | * objects). | |
ea25da48 PV |
553 | */ |
554 | void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, | |
555 | struct bfq_group *bfqg) | |
556 | { | |
557 | struct bfq_entity *entity = &bfqq->entity; | |
558 | ||
559 | /* If bfqq is empty, then bfq_bfqq_expire also invokes | |
560 | * bfq_del_bfqq_busy, thereby removing bfqq and its entity | |
561 | * from data structures related to current group. Otherwise we | |
562 | * need to remove bfqq explicitly with bfq_deactivate_bfqq, as | |
563 | * we do below. | |
564 | */ | |
565 | if (bfqq == bfqd->in_service_queue) | |
566 | bfq_bfqq_expire(bfqd, bfqd->in_service_queue, | |
567 | false, BFQQE_PREEMPTED); | |
568 | ||
569 | if (bfq_bfqq_busy(bfqq)) | |
570 | bfq_deactivate_bfqq(bfqd, bfqq, false, false); | |
571 | else if (entity->on_st) | |
572 | bfq_put_idle_entity(bfq_entity_service_tree(entity), entity); | |
8f9bebc3 | 573 | bfqg_and_blkg_put(bfqq_group(bfqq)); |
ea25da48 | 574 | |
ea25da48 PV |
575 | entity->parent = bfqg->my_entity; |
576 | entity->sched_data = &bfqg->sched_data; | |
8f9bebc3 PV |
577 | /* pin down bfqg and its associated blkg */ |
578 | bfqg_and_blkg_get(bfqg); | |
ea25da48 PV |
579 | |
580 | if (bfq_bfqq_busy(bfqq)) { | |
581 | bfq_pos_tree_add_move(bfqd, bfqq); | |
582 | bfq_activate_bfqq(bfqd, bfqq); | |
583 | } | |
584 | ||
585 | if (!bfqd->in_service_queue && !bfqd->rq_in_driver) | |
586 | bfq_schedule_dispatch(bfqd); | |
587 | } | |
588 | ||
589 | /** | |
590 | * __bfq_bic_change_cgroup - move @bic to @cgroup. | |
591 | * @bfqd: the queue descriptor. | |
592 | * @bic: the bic to move. | |
593 | * @blkcg: the blk-cgroup to move to. | |
594 | * | |
8f9bebc3 PV |
595 | * Move bic to blkcg, assuming that bfqd->lock is held; which makes |
596 | * sure that the reference to cgroup is valid across the call (see | |
597 | * comments in bfq_bic_update_cgroup on this issue) | |
ea25da48 PV |
598 | * |
599 | * NOTE: an alternative approach might have been to store the current | |
600 | * cgroup in bfqq and getting a reference to it, reducing the lookup | |
601 | * time here, at the price of slightly more complex code. | |
602 | */ | |
603 | static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd, | |
604 | struct bfq_io_cq *bic, | |
605 | struct blkcg *blkcg) | |
606 | { | |
607 | struct bfq_queue *async_bfqq = bic_to_bfqq(bic, 0); | |
608 | struct bfq_queue *sync_bfqq = bic_to_bfqq(bic, 1); | |
609 | struct bfq_group *bfqg; | |
610 | struct bfq_entity *entity; | |
611 | ||
612 | bfqg = bfq_find_set_group(bfqd, blkcg); | |
613 | ||
614 | if (unlikely(!bfqg)) | |
615 | bfqg = bfqd->root_group; | |
616 | ||
617 | if (async_bfqq) { | |
618 | entity = &async_bfqq->entity; | |
619 | ||
620 | if (entity->sched_data != &bfqg->sched_data) { | |
621 | bic_set_bfqq(bic, NULL, 0); | |
622 | bfq_log_bfqq(bfqd, async_bfqq, | |
623 | "bic_change_group: %p %d", | |
624 | async_bfqq, async_bfqq->ref); | |
625 | bfq_put_queue(async_bfqq); | |
626 | } | |
627 | } | |
628 | ||
629 | if (sync_bfqq) { | |
630 | entity = &sync_bfqq->entity; | |
631 | if (entity->sched_data != &bfqg->sched_data) | |
632 | bfq_bfqq_move(bfqd, sync_bfqq, bfqg); | |
633 | } | |
634 | ||
635 | return bfqg; | |
636 | } | |
637 | ||
638 | void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) | |
639 | { | |
640 | struct bfq_data *bfqd = bic_to_bfqd(bic); | |
641 | struct bfq_group *bfqg = NULL; | |
642 | uint64_t serial_nr; | |
643 | ||
644 | rcu_read_lock(); | |
645 | serial_nr = bio_blkcg(bio)->css.serial_nr; | |
646 | ||
647 | /* | |
648 | * Check whether blkcg has changed. The condition may trigger | |
649 | * spuriously on a newly created cic but there's no harm. | |
650 | */ | |
651 | if (unlikely(!bfqd) || likely(bic->blkcg_serial_nr == serial_nr)) | |
652 | goto out; | |
653 | ||
654 | bfqg = __bfq_bic_change_cgroup(bfqd, bic, bio_blkcg(bio)); | |
8f9bebc3 PV |
655 | /* |
656 | * Update blkg_path for bfq_log_* functions. We cache this | |
657 | * path, and update it here, for the following | |
658 | * reasons. Operations on blkg objects in blk-cgroup are | |
659 | * protected with the request_queue lock, and not with the | |
660 | * lock that protects the instances of this scheduler | |
661 | * (bfqd->lock). This exposes BFQ to the following sort of | |
662 | * race. | |
663 | * | |
664 | * The blkg_lookup performed in bfq_get_queue, protected | |
665 | * through rcu, may happen to return the address of a copy of | |
666 | * the original blkg. If this is the case, then the | |
667 | * bfqg_and_blkg_get performed in bfq_get_queue, to pin down | |
668 | * the blkg, is useless: it does not prevent blk-cgroup code | |
669 | * from destroying both the original blkg and all objects | |
670 | * directly or indirectly referred by the copy of the | |
671 | * blkg. | |
672 | * | |
673 | * On the bright side, destroy operations on a blkg invoke, as | |
674 | * a first step, hooks of the scheduler associated with the | |
675 | * blkg. And these hooks are executed with bfqd->lock held for | |
676 | * BFQ. As a consequence, for any blkg associated with the | |
677 | * request queue this instance of the scheduler is attached | |
678 | * to, we are guaranteed that such a blkg is not destroyed, and | |
679 | * that all the pointers it contains are consistent, while we | |
680 | * are holding bfqd->lock. A blkg_lookup performed with | |
681 | * bfqd->lock held then returns a fully consistent blkg, which | |
682 | * remains consistent until this lock is held. | |
683 | * | |
684 | * Thanks to the last fact, and to the fact that: (1) bfqg has | |
685 | * been obtained through a blkg_lookup in the above | |
686 | * assignment, and (2) bfqd->lock is being held, here we can | |
687 | * safely use the policy data for the involved blkg (i.e., the | |
688 | * field bfqg->pd) to get to the blkg associated with bfqg, | |
689 | * and then we can safely use any field of blkg. After we | |
690 | * release bfqd->lock, even just getting blkg through this | |
691 | * bfqg may cause dangling references to be traversed, as | |
692 | * bfqg->pd may not exist any more. | |
693 | * | |
694 | * In view of the above facts, here we cache, in the bfqg, any | |
695 | * blkg data we may need for this bic, and for its associated | |
696 | * bfq_queue. As of now, we need to cache only the path of the | |
697 | * blkg, which is used in the bfq_log_* functions. | |
698 | * | |
699 | * Finally, note that bfqg itself needs to be protected from | |
700 | * destruction on the blkg_free of the original blkg (which | |
701 | * invokes bfq_pd_free). We use an additional private | |
702 | * refcounter for bfqg, to let it disappear only after no | |
703 | * bfq_queue refers to it any longer. | |
704 | */ | |
705 | blkg_path(bfqg_to_blkg(bfqg), bfqg->blkg_path, sizeof(bfqg->blkg_path)); | |
ea25da48 PV |
706 | bic->blkcg_serial_nr = serial_nr; |
707 | out: | |
708 | rcu_read_unlock(); | |
709 | } | |
710 | ||
711 | /** | |
712 | * bfq_flush_idle_tree - deactivate any entity on the idle tree of @st. | |
713 | * @st: the service tree being flushed. | |
714 | */ | |
715 | static void bfq_flush_idle_tree(struct bfq_service_tree *st) | |
716 | { | |
717 | struct bfq_entity *entity = st->first_idle; | |
718 | ||
719 | for (; entity ; entity = st->first_idle) | |
720 | __bfq_deactivate_entity(entity, false); | |
721 | } | |
722 | ||
723 | /** | |
724 | * bfq_reparent_leaf_entity - move leaf entity to the root_group. | |
725 | * @bfqd: the device data structure with the root group. | |
726 | * @entity: the entity to move. | |
727 | */ | |
728 | static void bfq_reparent_leaf_entity(struct bfq_data *bfqd, | |
729 | struct bfq_entity *entity) | |
730 | { | |
731 | struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); | |
732 | ||
733 | bfq_bfqq_move(bfqd, bfqq, bfqd->root_group); | |
734 | } | |
735 | ||
736 | /** | |
737 | * bfq_reparent_active_entities - move to the root group all active | |
738 | * entities. | |
739 | * @bfqd: the device data structure with the root group. | |
740 | * @bfqg: the group to move from. | |
741 | * @st: the service tree with the entities. | |
ea25da48 PV |
742 | */ |
743 | static void bfq_reparent_active_entities(struct bfq_data *bfqd, | |
744 | struct bfq_group *bfqg, | |
745 | struct bfq_service_tree *st) | |
746 | { | |
747 | struct rb_root *active = &st->active; | |
748 | struct bfq_entity *entity = NULL; | |
749 | ||
750 | if (!RB_EMPTY_ROOT(&st->active)) | |
751 | entity = bfq_entity_of(rb_first(active)); | |
752 | ||
753 | for (; entity ; entity = bfq_entity_of(rb_first(active))) | |
754 | bfq_reparent_leaf_entity(bfqd, entity); | |
755 | ||
756 | if (bfqg->sched_data.in_service_entity) | |
757 | bfq_reparent_leaf_entity(bfqd, | |
758 | bfqg->sched_data.in_service_entity); | |
759 | } | |
760 | ||
761 | /** | |
762 | * bfq_pd_offline - deactivate the entity associated with @pd, | |
763 | * and reparent its children entities. | |
764 | * @pd: descriptor of the policy going offline. | |
765 | * | |
766 | * blkio already grabs the queue_lock for us, so no need to use | |
767 | * RCU-based magic | |
768 | */ | |
dfb79af5 | 769 | static void bfq_pd_offline(struct blkg_policy_data *pd) |
ea25da48 PV |
770 | { |
771 | struct bfq_service_tree *st; | |
772 | struct bfq_group *bfqg = pd_to_bfqg(pd); | |
773 | struct bfq_data *bfqd = bfqg->bfqd; | |
774 | struct bfq_entity *entity = bfqg->my_entity; | |
775 | unsigned long flags; | |
776 | int i; | |
777 | ||
52257ffb PV |
778 | spin_lock_irqsave(&bfqd->lock, flags); |
779 | ||
ea25da48 | 780 | if (!entity) /* root group */ |
52257ffb | 781 | goto put_async_queues; |
ea25da48 | 782 | |
ea25da48 PV |
783 | /* |
784 | * Empty all service_trees belonging to this group before | |
785 | * deactivating the group itself. | |
786 | */ | |
787 | for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) { | |
788 | st = bfqg->sched_data.service_tree + i; | |
789 | ||
790 | /* | |
791 | * The idle tree may still contain bfq_queues belonging | |
792 | * to exited task because they never migrated to a different | |
8f9bebc3 | 793 | * cgroup from the one being destroyed now. |
ea25da48 PV |
794 | */ |
795 | bfq_flush_idle_tree(st); | |
796 | ||
797 | /* | |
798 | * It may happen that some queues are still active | |
799 | * (busy) upon group destruction (if the corresponding | |
800 | * processes have been forced to terminate). We move | |
801 | * all the leaf entities corresponding to these queues | |
802 | * to the root_group. | |
803 | * Also, it may happen that the group has an entity | |
804 | * in service, which is disconnected from the active | |
805 | * tree: it must be moved, too. | |
806 | * There is no need to put the sync queues, as the | |
807 | * scheduler has taken no reference. | |
808 | */ | |
809 | bfq_reparent_active_entities(bfqd, bfqg, st); | |
810 | } | |
811 | ||
812 | __bfq_deactivate_entity(entity, false); | |
52257ffb PV |
813 | |
814 | put_async_queues: | |
ea25da48 PV |
815 | bfq_put_async_queues(bfqd, bfqg); |
816 | ||
817 | spin_unlock_irqrestore(&bfqd->lock, flags); | |
818 | /* | |
819 | * @blkg is going offline and will be ignored by | |
820 | * blkg_[rw]stat_recursive_sum(). Transfer stats to the parent so | |
821 | * that they don't get lost. If IOs complete after this point, the | |
822 | * stats for them will be lost. Oh well... | |
823 | */ | |
824 | bfqg_stats_xfer_dead(bfqg); | |
825 | } | |
826 | ||
827 | void bfq_end_wr_async(struct bfq_data *bfqd) | |
828 | { | |
829 | struct blkcg_gq *blkg; | |
830 | ||
831 | list_for_each_entry(blkg, &bfqd->queue->blkg_list, q_node) { | |
832 | struct bfq_group *bfqg = blkg_to_bfqg(blkg); | |
833 | ||
834 | bfq_end_wr_async_queues(bfqd, bfqg); | |
835 | } | |
836 | bfq_end_wr_async_queues(bfqd, bfqd->root_group); | |
837 | } | |
838 | ||
839 | static int bfq_io_show_weight(struct seq_file *sf, void *v) | |
840 | { | |
841 | struct blkcg *blkcg = css_to_blkcg(seq_css(sf)); | |
842 | struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg); | |
843 | unsigned int val = 0; | |
844 | ||
845 | if (bfqgd) | |
846 | val = bfqgd->weight; | |
847 | ||
848 | seq_printf(sf, "%u\n", val); | |
849 | ||
850 | return 0; | |
851 | } | |
852 | ||
853 | static int bfq_io_set_weight_legacy(struct cgroup_subsys_state *css, | |
854 | struct cftype *cftype, | |
855 | u64 val) | |
856 | { | |
857 | struct blkcg *blkcg = css_to_blkcg(css); | |
858 | struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg); | |
859 | struct blkcg_gq *blkg; | |
860 | int ret = -ERANGE; | |
861 | ||
862 | if (val < BFQ_MIN_WEIGHT || val > BFQ_MAX_WEIGHT) | |
863 | return ret; | |
864 | ||
865 | ret = 0; | |
866 | spin_lock_irq(&blkcg->lock); | |
867 | bfqgd->weight = (unsigned short)val; | |
868 | hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) { | |
869 | struct bfq_group *bfqg = blkg_to_bfqg(blkg); | |
870 | ||
871 | if (!bfqg) | |
872 | continue; | |
873 | /* | |
874 | * Setting the prio_changed flag of the entity | |
875 | * to 1 with new_weight == weight would re-set | |
876 | * the value of the weight to its ioprio mapping. | |
877 | * Set the flag only if necessary. | |
878 | */ | |
879 | if ((unsigned short)val != bfqg->entity.new_weight) { | |
880 | bfqg->entity.new_weight = (unsigned short)val; | |
881 | /* | |
882 | * Make sure that the above new value has been | |
883 | * stored in bfqg->entity.new_weight before | |
884 | * setting the prio_changed flag. In fact, | |
885 | * this flag may be read asynchronously (in | |
886 | * critical sections protected by a different | |
887 | * lock than that held here), and finding this | |
888 | * flag set may cause the execution of the code | |
889 | * for updating parameters whose value may | |
890 | * depend also on bfqg->entity.new_weight (in | |
891 | * __bfq_entity_update_weight_prio). | |
892 | * This barrier makes sure that the new value | |
893 | * of bfqg->entity.new_weight is correctly | |
894 | * seen in that code. | |
895 | */ | |
896 | smp_wmb(); | |
897 | bfqg->entity.prio_changed = 1; | |
898 | } | |
899 | } | |
900 | spin_unlock_irq(&blkcg->lock); | |
901 | ||
902 | return ret; | |
903 | } | |
904 | ||
905 | static ssize_t bfq_io_set_weight(struct kernfs_open_file *of, | |
906 | char *buf, size_t nbytes, | |
907 | loff_t off) | |
908 | { | |
909 | u64 weight; | |
910 | /* First unsigned long found in the file is used */ | |
911 | int ret = kstrtoull(strim(buf), 0, &weight); | |
912 | ||
913 | if (ret) | |
914 | return ret; | |
915 | ||
916 | return bfq_io_set_weight_legacy(of_css(of), NULL, weight); | |
917 | } | |
918 | ||
a33801e8 | 919 | #ifdef CONFIG_DEBUG_BLK_CGROUP |
ea25da48 PV |
920 | static int bfqg_print_stat(struct seq_file *sf, void *v) |
921 | { | |
922 | blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_stat, | |
923 | &blkcg_policy_bfq, seq_cft(sf)->private, false); | |
924 | return 0; | |
925 | } | |
926 | ||
927 | static int bfqg_print_rwstat(struct seq_file *sf, void *v) | |
928 | { | |
929 | blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_rwstat, | |
930 | &blkcg_policy_bfq, seq_cft(sf)->private, true); | |
931 | return 0; | |
932 | } | |
933 | ||
934 | static u64 bfqg_prfill_stat_recursive(struct seq_file *sf, | |
935 | struct blkg_policy_data *pd, int off) | |
936 | { | |
937 | u64 sum = blkg_stat_recursive_sum(pd_to_blkg(pd), | |
938 | &blkcg_policy_bfq, off); | |
939 | return __blkg_prfill_u64(sf, pd, sum); | |
940 | } | |
941 | ||
942 | static u64 bfqg_prfill_rwstat_recursive(struct seq_file *sf, | |
943 | struct blkg_policy_data *pd, int off) | |
944 | { | |
945 | struct blkg_rwstat sum = blkg_rwstat_recursive_sum(pd_to_blkg(pd), | |
946 | &blkcg_policy_bfq, | |
947 | off); | |
948 | return __blkg_prfill_rwstat(sf, pd, &sum); | |
949 | } | |
950 | ||
951 | static int bfqg_print_stat_recursive(struct seq_file *sf, void *v) | |
952 | { | |
953 | blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), | |
954 | bfqg_prfill_stat_recursive, &blkcg_policy_bfq, | |
955 | seq_cft(sf)->private, false); | |
956 | return 0; | |
957 | } | |
958 | ||
959 | static int bfqg_print_rwstat_recursive(struct seq_file *sf, void *v) | |
960 | { | |
961 | blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), | |
962 | bfqg_prfill_rwstat_recursive, &blkcg_policy_bfq, | |
963 | seq_cft(sf)->private, true); | |
964 | return 0; | |
965 | } | |
966 | ||
967 | static u64 bfqg_prfill_sectors(struct seq_file *sf, struct blkg_policy_data *pd, | |
968 | int off) | |
969 | { | |
970 | u64 sum = blkg_rwstat_total(&pd->blkg->stat_bytes); | |
971 | ||
972 | return __blkg_prfill_u64(sf, pd, sum >> 9); | |
973 | } | |
974 | ||
975 | static int bfqg_print_stat_sectors(struct seq_file *sf, void *v) | |
976 | { | |
977 | blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), | |
978 | bfqg_prfill_sectors, &blkcg_policy_bfq, 0, false); | |
979 | return 0; | |
980 | } | |
981 | ||
982 | static u64 bfqg_prfill_sectors_recursive(struct seq_file *sf, | |
983 | struct blkg_policy_data *pd, int off) | |
984 | { | |
985 | struct blkg_rwstat tmp = blkg_rwstat_recursive_sum(pd->blkg, NULL, | |
986 | offsetof(struct blkcg_gq, stat_bytes)); | |
987 | u64 sum = atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_READ]) + | |
988 | atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]); | |
989 | ||
990 | return __blkg_prfill_u64(sf, pd, sum >> 9); | |
991 | } | |
992 | ||
993 | static int bfqg_print_stat_sectors_recursive(struct seq_file *sf, void *v) | |
994 | { | |
995 | blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), | |
996 | bfqg_prfill_sectors_recursive, &blkcg_policy_bfq, 0, | |
997 | false); | |
998 | return 0; | |
999 | } | |
1000 | ||
1001 | static u64 bfqg_prfill_avg_queue_size(struct seq_file *sf, | |
1002 | struct blkg_policy_data *pd, int off) | |
1003 | { | |
1004 | struct bfq_group *bfqg = pd_to_bfqg(pd); | |
1005 | u64 samples = blkg_stat_read(&bfqg->stats.avg_queue_size_samples); | |
1006 | u64 v = 0; | |
1007 | ||
1008 | if (samples) { | |
1009 | v = blkg_stat_read(&bfqg->stats.avg_queue_size_sum); | |
1010 | v = div64_u64(v, samples); | |
1011 | } | |
1012 | __blkg_prfill_u64(sf, pd, v); | |
1013 | return 0; | |
1014 | } | |
1015 | ||
1016 | /* print avg_queue_size */ | |
1017 | static int bfqg_print_avg_queue_size(struct seq_file *sf, void *v) | |
1018 | { | |
1019 | blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), | |
1020 | bfqg_prfill_avg_queue_size, &blkcg_policy_bfq, | |
1021 | 0, false); | |
1022 | return 0; | |
1023 | } | |
a33801e8 | 1024 | #endif /* CONFIG_DEBUG_BLK_CGROUP */ |
ea25da48 PV |
1025 | |
1026 | struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node) | |
1027 | { | |
1028 | int ret; | |
1029 | ||
1030 | ret = blkcg_activate_policy(bfqd->queue, &blkcg_policy_bfq); | |
1031 | if (ret) | |
1032 | return NULL; | |
1033 | ||
1034 | return blkg_to_bfqg(bfqd->queue->root_blkg); | |
1035 | } | |
1036 | ||
1037 | struct blkcg_policy blkcg_policy_bfq = { | |
1038 | .dfl_cftypes = bfq_blkg_files, | |
1039 | .legacy_cftypes = bfq_blkcg_legacy_files, | |
1040 | ||
1041 | .cpd_alloc_fn = bfq_cpd_alloc, | |
1042 | .cpd_init_fn = bfq_cpd_init, | |
1043 | .cpd_bind_fn = bfq_cpd_init, | |
1044 | .cpd_free_fn = bfq_cpd_free, | |
1045 | ||
1046 | .pd_alloc_fn = bfq_pd_alloc, | |
1047 | .pd_init_fn = bfq_pd_init, | |
1048 | .pd_offline_fn = bfq_pd_offline, | |
1049 | .pd_free_fn = bfq_pd_free, | |
1050 | .pd_reset_stats_fn = bfq_pd_reset_stats, | |
1051 | }; | |
1052 | ||
1053 | struct cftype bfq_blkcg_legacy_files[] = { | |
1054 | { | |
1055 | .name = "bfq.weight", | |
1056 | .flags = CFTYPE_NOT_ON_ROOT, | |
1057 | .seq_show = bfq_io_show_weight, | |
1058 | .write_u64 = bfq_io_set_weight_legacy, | |
1059 | }, | |
1060 | ||
1061 | /* statistics, covers only the tasks in the bfqg */ | |
ea25da48 PV |
1062 | { |
1063 | .name = "bfq.io_service_bytes", | |
1064 | .private = (unsigned long)&blkcg_policy_bfq, | |
1065 | .seq_show = blkg_print_stat_bytes, | |
1066 | }, | |
1067 | { | |
1068 | .name = "bfq.io_serviced", | |
1069 | .private = (unsigned long)&blkcg_policy_bfq, | |
1070 | .seq_show = blkg_print_stat_ios, | |
1071 | }, | |
a33801e8 LM |
1072 | #ifdef CONFIG_DEBUG_BLK_CGROUP |
1073 | { | |
1074 | .name = "bfq.time", | |
1075 | .private = offsetof(struct bfq_group, stats.time), | |
1076 | .seq_show = bfqg_print_stat, | |
1077 | }, | |
1078 | { | |
1079 | .name = "bfq.sectors", | |
1080 | .seq_show = bfqg_print_stat_sectors, | |
1081 | }, | |
ea25da48 PV |
1082 | { |
1083 | .name = "bfq.io_service_time", | |
1084 | .private = offsetof(struct bfq_group, stats.service_time), | |
1085 | .seq_show = bfqg_print_rwstat, | |
1086 | }, | |
1087 | { | |
1088 | .name = "bfq.io_wait_time", | |
1089 | .private = offsetof(struct bfq_group, stats.wait_time), | |
1090 | .seq_show = bfqg_print_rwstat, | |
1091 | }, | |
1092 | { | |
1093 | .name = "bfq.io_merged", | |
1094 | .private = offsetof(struct bfq_group, stats.merged), | |
1095 | .seq_show = bfqg_print_rwstat, | |
1096 | }, | |
1097 | { | |
1098 | .name = "bfq.io_queued", | |
1099 | .private = offsetof(struct bfq_group, stats.queued), | |
1100 | .seq_show = bfqg_print_rwstat, | |
1101 | }, | |
a33801e8 | 1102 | #endif /* CONFIG_DEBUG_BLK_CGROUP */ |
ea25da48 PV |
1103 | |
1104 | /* the same statictics which cover the bfqg and its descendants */ | |
ea25da48 PV |
1105 | { |
1106 | .name = "bfq.io_service_bytes_recursive", | |
1107 | .private = (unsigned long)&blkcg_policy_bfq, | |
1108 | .seq_show = blkg_print_stat_bytes_recursive, | |
1109 | }, | |
1110 | { | |
1111 | .name = "bfq.io_serviced_recursive", | |
1112 | .private = (unsigned long)&blkcg_policy_bfq, | |
1113 | .seq_show = blkg_print_stat_ios_recursive, | |
1114 | }, | |
a33801e8 LM |
1115 | #ifdef CONFIG_DEBUG_BLK_CGROUP |
1116 | { | |
1117 | .name = "bfq.time_recursive", | |
1118 | .private = offsetof(struct bfq_group, stats.time), | |
1119 | .seq_show = bfqg_print_stat_recursive, | |
1120 | }, | |
1121 | { | |
1122 | .name = "bfq.sectors_recursive", | |
1123 | .seq_show = bfqg_print_stat_sectors_recursive, | |
1124 | }, | |
ea25da48 PV |
1125 | { |
1126 | .name = "bfq.io_service_time_recursive", | |
1127 | .private = offsetof(struct bfq_group, stats.service_time), | |
1128 | .seq_show = bfqg_print_rwstat_recursive, | |
1129 | }, | |
1130 | { | |
1131 | .name = "bfq.io_wait_time_recursive", | |
1132 | .private = offsetof(struct bfq_group, stats.wait_time), | |
1133 | .seq_show = bfqg_print_rwstat_recursive, | |
1134 | }, | |
1135 | { | |
1136 | .name = "bfq.io_merged_recursive", | |
1137 | .private = offsetof(struct bfq_group, stats.merged), | |
1138 | .seq_show = bfqg_print_rwstat_recursive, | |
1139 | }, | |
1140 | { | |
1141 | .name = "bfq.io_queued_recursive", | |
1142 | .private = offsetof(struct bfq_group, stats.queued), | |
1143 | .seq_show = bfqg_print_rwstat_recursive, | |
1144 | }, | |
1145 | { | |
1146 | .name = "bfq.avg_queue_size", | |
1147 | .seq_show = bfqg_print_avg_queue_size, | |
1148 | }, | |
1149 | { | |
1150 | .name = "bfq.group_wait_time", | |
1151 | .private = offsetof(struct bfq_group, stats.group_wait_time), | |
1152 | .seq_show = bfqg_print_stat, | |
1153 | }, | |
1154 | { | |
1155 | .name = "bfq.idle_time", | |
1156 | .private = offsetof(struct bfq_group, stats.idle_time), | |
1157 | .seq_show = bfqg_print_stat, | |
1158 | }, | |
1159 | { | |
1160 | .name = "bfq.empty_time", | |
1161 | .private = offsetof(struct bfq_group, stats.empty_time), | |
1162 | .seq_show = bfqg_print_stat, | |
1163 | }, | |
1164 | { | |
1165 | .name = "bfq.dequeue", | |
1166 | .private = offsetof(struct bfq_group, stats.dequeue), | |
1167 | .seq_show = bfqg_print_stat, | |
1168 | }, | |
a33801e8 | 1169 | #endif /* CONFIG_DEBUG_BLK_CGROUP */ |
ea25da48 PV |
1170 | { } /* terminate */ |
1171 | }; | |
1172 | ||
1173 | struct cftype bfq_blkg_files[] = { | |
1174 | { | |
1175 | .name = "bfq.weight", | |
1176 | .flags = CFTYPE_NOT_ON_ROOT, | |
1177 | .seq_show = bfq_io_show_weight, | |
1178 | .write = bfq_io_set_weight, | |
1179 | }, | |
1180 | {} /* terminate */ | |
1181 | }; | |
1182 | ||
1183 | #else /* CONFIG_BFQ_GROUP_IOSCHED */ | |
1184 | ||
ea25da48 PV |
1185 | void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, |
1186 | struct bfq_group *bfqg) {} | |
1187 | ||
1188 | void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg) | |
1189 | { | |
1190 | struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); | |
1191 | ||
1192 | entity->weight = entity->new_weight; | |
1193 | entity->orig_weight = entity->new_weight; | |
1194 | if (bfqq) { | |
1195 | bfqq->ioprio = bfqq->new_ioprio; | |
1196 | bfqq->ioprio_class = bfqq->new_ioprio_class; | |
1197 | } | |
1198 | entity->sched_data = &bfqg->sched_data; | |
1199 | } | |
1200 | ||
1201 | void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) {} | |
1202 | ||
1203 | void bfq_end_wr_async(struct bfq_data *bfqd) | |
1204 | { | |
1205 | bfq_end_wr_async_queues(bfqd, bfqd->root_group); | |
1206 | } | |
1207 | ||
1208 | struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd, struct blkcg *blkcg) | |
1209 | { | |
1210 | return bfqd->root_group; | |
1211 | } | |
1212 | ||
1213 | struct bfq_group *bfqq_group(struct bfq_queue *bfqq) | |
1214 | { | |
1215 | return bfqq->bfqd->root_group; | |
1216 | } | |
1217 | ||
1218 | struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node) | |
1219 | { | |
1220 | struct bfq_group *bfqg; | |
1221 | int i; | |
1222 | ||
1223 | bfqg = kmalloc_node(sizeof(*bfqg), GFP_KERNEL | __GFP_ZERO, node); | |
1224 | if (!bfqg) | |
1225 | return NULL; | |
1226 | ||
1227 | for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) | |
1228 | bfqg->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT; | |
1229 | ||
1230 | return bfqg; | |
1231 | } | |
1232 | #endif /* CONFIG_BFQ_GROUP_IOSCHED */ |