Merge branch 'for-6.4/amd-sfh' into for-linus
[linux-block.git] / block / bfq-cgroup.c
CommitLineData
a497ee34 1// SPDX-License-Identifier: GPL-2.0-or-later
ea25da48
PV
2/*
3 * cgroups support for the BFQ I/O scheduler.
ea25da48
PV
4 */
5#include <linux/module.h>
6#include <linux/slab.h>
7#include <linux/blkdev.h>
8#include <linux/cgroup.h>
ea25da48
PV
9#include <linux/ktime.h>
10#include <linux/rbtree.h>
11#include <linux/ioprio.h>
12#include <linux/sbitmap.h>
13#include <linux/delay.h>
14
2e9bc346 15#include "elevator.h"
ea25da48
PV
16#include "bfq-iosched.h"
17
8060c47b 18#ifdef CONFIG_BFQ_CGROUP_DEBUG
c0ce79dc
CH
19static int bfq_stat_init(struct bfq_stat *stat, gfp_t gfp)
20{
21 int ret;
22
23 ret = percpu_counter_init(&stat->cpu_cnt, 0, gfp);
24 if (ret)
25 return ret;
26
27 atomic64_set(&stat->aux_cnt, 0);
28 return 0;
29}
30
31static void bfq_stat_exit(struct bfq_stat *stat)
32{
33 percpu_counter_destroy(&stat->cpu_cnt);
34}
35
36/**
37 * bfq_stat_add - add a value to a bfq_stat
38 * @stat: target bfq_stat
39 * @val: value to add
40 *
41 * Add @val to @stat. The caller must ensure that IRQ on the same CPU
42 * don't re-enter this function for the same counter.
43 */
44static inline void bfq_stat_add(struct bfq_stat *stat, uint64_t val)
45{
46 percpu_counter_add_batch(&stat->cpu_cnt, val, BLKG_STAT_CPU_BATCH);
47}
48
49/**
50 * bfq_stat_read - read the current value of a bfq_stat
51 * @stat: bfq_stat to read
52 */
53static inline uint64_t bfq_stat_read(struct bfq_stat *stat)
54{
55 return percpu_counter_sum_positive(&stat->cpu_cnt);
56}
57
58/**
59 * bfq_stat_reset - reset a bfq_stat
60 * @stat: bfq_stat to reset
61 */
62static inline void bfq_stat_reset(struct bfq_stat *stat)
63{
64 percpu_counter_set(&stat->cpu_cnt, 0);
65 atomic64_set(&stat->aux_cnt, 0);
66}
67
68/**
69 * bfq_stat_add_aux - add a bfq_stat into another's aux count
70 * @to: the destination bfq_stat
71 * @from: the source
72 *
73 * Add @from's count including the aux one to @to's aux count.
74 */
75static inline void bfq_stat_add_aux(struct bfq_stat *to,
76 struct bfq_stat *from)
77{
78 atomic64_add(bfq_stat_read(from) + atomic64_read(&from->aux_cnt),
79 &to->aux_cnt);
80}
81
c0ce79dc
CH
82/**
83 * blkg_prfill_stat - prfill callback for bfq_stat
84 * @sf: seq_file to print to
85 * @pd: policy private data of interest
86 * @off: offset to the bfq_stat in @pd
87 *
88 * prfill callback for printing a bfq_stat.
89 */
90static u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd,
91 int off)
92{
93 return __blkg_prfill_u64(sf, pd, bfq_stat_read((void *)pd + off));
94}
95
ea25da48
PV
96/* bfqg stats flags */
97enum bfqg_stats_flags {
98 BFQG_stats_waiting = 0,
99 BFQG_stats_idling,
100 BFQG_stats_empty,
101};
102
103#define BFQG_FLAG_FNS(name) \
104static void bfqg_stats_mark_##name(struct bfqg_stats *stats) \
105{ \
106 stats->flags |= (1 << BFQG_stats_##name); \
107} \
108static void bfqg_stats_clear_##name(struct bfqg_stats *stats) \
109{ \
110 stats->flags &= ~(1 << BFQG_stats_##name); \
111} \
112static int bfqg_stats_##name(struct bfqg_stats *stats) \
113{ \
114 return (stats->flags & (1 << BFQG_stats_##name)) != 0; \
115} \
116
117BFQG_FLAG_FNS(waiting)
118BFQG_FLAG_FNS(idling)
119BFQG_FLAG_FNS(empty)
120#undef BFQG_FLAG_FNS
121
8f9bebc3 122/* This should be called with the scheduler lock held. */
ea25da48
PV
123static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats)
124{
84c7afce 125 u64 now;
ea25da48
PV
126
127 if (!bfqg_stats_waiting(stats))
128 return;
129
84c7afce
OS
130 now = ktime_get_ns();
131 if (now > stats->start_group_wait_time)
c0ce79dc 132 bfq_stat_add(&stats->group_wait_time,
ea25da48
PV
133 now - stats->start_group_wait_time);
134 bfqg_stats_clear_waiting(stats);
135}
136
8f9bebc3 137/* This should be called with the scheduler lock held. */
ea25da48
PV
138static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg,
139 struct bfq_group *curr_bfqg)
140{
141 struct bfqg_stats *stats = &bfqg->stats;
142
143 if (bfqg_stats_waiting(stats))
144 return;
145 if (bfqg == curr_bfqg)
146 return;
84c7afce 147 stats->start_group_wait_time = ktime_get_ns();
ea25da48
PV
148 bfqg_stats_mark_waiting(stats);
149}
150
8f9bebc3 151/* This should be called with the scheduler lock held. */
ea25da48
PV
152static void bfqg_stats_end_empty_time(struct bfqg_stats *stats)
153{
84c7afce 154 u64 now;
ea25da48
PV
155
156 if (!bfqg_stats_empty(stats))
157 return;
158
84c7afce
OS
159 now = ktime_get_ns();
160 if (now > stats->start_empty_time)
c0ce79dc 161 bfq_stat_add(&stats->empty_time,
ea25da48
PV
162 now - stats->start_empty_time);
163 bfqg_stats_clear_empty(stats);
164}
165
166void bfqg_stats_update_dequeue(struct bfq_group *bfqg)
167{
c0ce79dc 168 bfq_stat_add(&bfqg->stats.dequeue, 1);
ea25da48
PV
169}
170
171void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg)
172{
173 struct bfqg_stats *stats = &bfqg->stats;
174
175 if (blkg_rwstat_total(&stats->queued))
176 return;
177
178 /*
179 * group is already marked empty. This can happen if bfqq got new
180 * request in parent group and moved to this group while being added
181 * to service tree. Just ignore the event and move on.
182 */
183 if (bfqg_stats_empty(stats))
184 return;
185
84c7afce 186 stats->start_empty_time = ktime_get_ns();
ea25da48
PV
187 bfqg_stats_mark_empty(stats);
188}
189
190void bfqg_stats_update_idle_time(struct bfq_group *bfqg)
191{
192 struct bfqg_stats *stats = &bfqg->stats;
193
194 if (bfqg_stats_idling(stats)) {
84c7afce 195 u64 now = ktime_get_ns();
ea25da48 196
84c7afce 197 if (now > stats->start_idle_time)
c0ce79dc 198 bfq_stat_add(&stats->idle_time,
ea25da48
PV
199 now - stats->start_idle_time);
200 bfqg_stats_clear_idling(stats);
201 }
202}
203
204void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg)
205{
206 struct bfqg_stats *stats = &bfqg->stats;
207
84c7afce 208 stats->start_idle_time = ktime_get_ns();
ea25da48
PV
209 bfqg_stats_mark_idling(stats);
210}
211
212void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg)
213{
214 struct bfqg_stats *stats = &bfqg->stats;
215
c0ce79dc 216 bfq_stat_add(&stats->avg_queue_size_sum,
ea25da48 217 blkg_rwstat_total(&stats->queued));
c0ce79dc 218 bfq_stat_add(&stats->avg_queue_size_samples, 1);
ea25da48
PV
219 bfqg_stats_update_group_wait_time(stats);
220}
221
a33801e8 222void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
dc469ba2 223 blk_opf_t opf)
a33801e8 224{
dc469ba2 225 blkg_rwstat_add(&bfqg->stats.queued, opf, 1);
a33801e8 226 bfqg_stats_end_empty_time(&bfqg->stats);
aa625117 227 if (!(bfqq == bfqg->bfqd->in_service_queue))
a33801e8
LM
228 bfqg_stats_set_start_group_wait_time(bfqg, bfqq_group(bfqq));
229}
230
dc469ba2 231void bfqg_stats_update_io_remove(struct bfq_group *bfqg, blk_opf_t opf)
a33801e8 232{
dc469ba2 233 blkg_rwstat_add(&bfqg->stats.queued, opf, -1);
a33801e8
LM
234}
235
dc469ba2 236void bfqg_stats_update_io_merged(struct bfq_group *bfqg, blk_opf_t opf)
a33801e8 237{
dc469ba2 238 blkg_rwstat_add(&bfqg->stats.merged, opf, 1);
a33801e8
LM
239}
240
84c7afce 241void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns,
dc469ba2 242 u64 io_start_time_ns, blk_opf_t opf)
a33801e8
LM
243{
244 struct bfqg_stats *stats = &bfqg->stats;
84c7afce 245 u64 now = ktime_get_ns();
a33801e8 246
84c7afce 247 if (now > io_start_time_ns)
dc469ba2 248 blkg_rwstat_add(&stats->service_time, opf,
84c7afce
OS
249 now - io_start_time_ns);
250 if (io_start_time_ns > start_time_ns)
dc469ba2 251 blkg_rwstat_add(&stats->wait_time, opf,
84c7afce 252 io_start_time_ns - start_time_ns);
a33801e8
LM
253}
254
8060c47b 255#else /* CONFIG_BFQ_CGROUP_DEBUG */
a33801e8 256
dc469ba2
BVA
257void bfqg_stats_update_io_remove(struct bfq_group *bfqg, blk_opf_t opf) { }
258void bfqg_stats_update_io_merged(struct bfq_group *bfqg, blk_opf_t opf) { }
84c7afce 259void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns,
dc469ba2 260 u64 io_start_time_ns, blk_opf_t opf) { }
a33801e8 261void bfqg_stats_update_dequeue(struct bfq_group *bfqg) { }
a33801e8 262void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg) { }
a33801e8 263
8060c47b 264#endif /* CONFIG_BFQ_CGROUP_DEBUG */
a33801e8
LM
265
266#ifdef CONFIG_BFQ_GROUP_IOSCHED
267
ea25da48
PV
268/*
269 * blk-cgroup policy-related handlers
270 * The following functions help in converting between blk-cgroup
271 * internal structures and BFQ-specific structures.
272 */
273
274static struct bfq_group *pd_to_bfqg(struct blkg_policy_data *pd)
275{
276 return pd ? container_of(pd, struct bfq_group, pd) : NULL;
277}
278
279struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg)
280{
281 return pd_to_blkg(&bfqg->pd);
282}
283
284static struct bfq_group *blkg_to_bfqg(struct blkcg_gq *blkg)
285{
286 return pd_to_bfqg(blkg_to_pd(blkg, &blkcg_policy_bfq));
287}
288
289/*
290 * bfq_group handlers
291 * The following functions help in navigating the bfq_group hierarchy
292 * by allowing to find the parent of a bfq_group or the bfq_group
293 * associated to a bfq_queue.
294 */
295
296static struct bfq_group *bfqg_parent(struct bfq_group *bfqg)
297{
298 struct blkcg_gq *pblkg = bfqg_to_blkg(bfqg)->parent;
299
300 return pblkg ? blkg_to_bfqg(pblkg) : NULL;
301}
302
303struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
304{
305 struct bfq_entity *group_entity = bfqq->entity.parent;
306
307 return group_entity ? container_of(group_entity, struct bfq_group,
308 entity) :
309 bfqq->bfqd->root_group;
310}
311
312/*
313 * The following two functions handle get and put of a bfq_group by
314 * wrapping the related blk-cgroup hooks.
315 */
316
317static void bfqg_get(struct bfq_group *bfqg)
318{
216f7647 319 refcount_inc(&bfqg->ref);
ea25da48
PV
320}
321
dfb79af5 322static void bfqg_put(struct bfq_group *bfqg)
ea25da48 323{
216f7647 324 if (refcount_dec_and_test(&bfqg->ref))
8f9bebc3
PV
325 kfree(bfqg);
326}
327
2de791ab 328static void bfqg_and_blkg_get(struct bfq_group *bfqg)
8f9bebc3
PV
329{
330 /* see comments in bfq_bic_update_cgroup for why refcounting bfqg */
331 bfqg_get(bfqg);
332
333 blkg_get(bfqg_to_blkg(bfqg));
334}
335
336void bfqg_and_blkg_put(struct bfq_group *bfqg)
337{
8f9bebc3 338 blkg_put(bfqg_to_blkg(bfqg));
d5274b3c
KK
339
340 bfqg_put(bfqg);
ea25da48
PV
341}
342
fd41e603
TH
343void bfqg_stats_update_legacy_io(struct request_queue *q, struct request *rq)
344{
345 struct bfq_group *bfqg = blkg_to_bfqg(rq->bio->bi_blkg);
346
08802ed6
HT
347 if (!bfqg)
348 return;
349
fd41e603
TH
350 blkg_rwstat_add(&bfqg->stats.bytes, rq->cmd_flags, blk_rq_bytes(rq));
351 blkg_rwstat_add(&bfqg->stats.ios, rq->cmd_flags, 1);
352}
353
ea25da48
PV
354/* @stats = 0 */
355static void bfqg_stats_reset(struct bfqg_stats *stats)
356{
8060c47b 357#ifdef CONFIG_BFQ_CGROUP_DEBUG
ea25da48
PV
358 /* queued stats shouldn't be cleared */
359 blkg_rwstat_reset(&stats->merged);
360 blkg_rwstat_reset(&stats->service_time);
361 blkg_rwstat_reset(&stats->wait_time);
c0ce79dc
CH
362 bfq_stat_reset(&stats->time);
363 bfq_stat_reset(&stats->avg_queue_size_sum);
364 bfq_stat_reset(&stats->avg_queue_size_samples);
365 bfq_stat_reset(&stats->dequeue);
366 bfq_stat_reset(&stats->group_wait_time);
367 bfq_stat_reset(&stats->idle_time);
368 bfq_stat_reset(&stats->empty_time);
a33801e8 369#endif
ea25da48
PV
370}
371
372/* @to += @from */
373static void bfqg_stats_add_aux(struct bfqg_stats *to, struct bfqg_stats *from)
374{
375 if (!to || !from)
376 return;
377
8060c47b 378#ifdef CONFIG_BFQ_CGROUP_DEBUG
ea25da48
PV
379 /* queued stats shouldn't be cleared */
380 blkg_rwstat_add_aux(&to->merged, &from->merged);
381 blkg_rwstat_add_aux(&to->service_time, &from->service_time);
382 blkg_rwstat_add_aux(&to->wait_time, &from->wait_time);
c0ce79dc
CH
383 bfq_stat_add_aux(&from->time, &from->time);
384 bfq_stat_add_aux(&to->avg_queue_size_sum, &from->avg_queue_size_sum);
385 bfq_stat_add_aux(&to->avg_queue_size_samples,
ea25da48 386 &from->avg_queue_size_samples);
c0ce79dc
CH
387 bfq_stat_add_aux(&to->dequeue, &from->dequeue);
388 bfq_stat_add_aux(&to->group_wait_time, &from->group_wait_time);
389 bfq_stat_add_aux(&to->idle_time, &from->idle_time);
390 bfq_stat_add_aux(&to->empty_time, &from->empty_time);
a33801e8 391#endif
ea25da48
PV
392}
393
394/*
395 * Transfer @bfqg's stats to its parent's aux counts so that the ancestors'
396 * recursive stats can still account for the amount used by this bfqg after
397 * it's gone.
398 */
399static void bfqg_stats_xfer_dead(struct bfq_group *bfqg)
400{
401 struct bfq_group *parent;
402
403 if (!bfqg) /* root_group */
404 return;
405
406 parent = bfqg_parent(bfqg);
407
0d945c1f 408 lockdep_assert_held(&bfqg_to_blkg(bfqg)->q->queue_lock);
ea25da48
PV
409
410 if (unlikely(!parent))
411 return;
412
413 bfqg_stats_add_aux(&parent->stats, &bfqg->stats);
414 bfqg_stats_reset(&bfqg->stats);
415}
416
417void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg)
418{
419 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
420
421 entity->weight = entity->new_weight;
422 entity->orig_weight = entity->new_weight;
423 if (bfqq) {
424 bfqq->ioprio = bfqq->new_ioprio;
425 bfqq->ioprio_class = bfqq->new_ioprio_class;
8f9bebc3
PV
426 /*
427 * Make sure that bfqg and its associated blkg do not
428 * disappear before entity.
429 */
430 bfqg_and_blkg_get(bfqg);
ea25da48
PV
431 }
432 entity->parent = bfqg->my_entity; /* NULL for root group */
433 entity->sched_data = &bfqg->sched_data;
434}
435
436static void bfqg_stats_exit(struct bfqg_stats *stats)
437{
fd41e603
TH
438 blkg_rwstat_exit(&stats->bytes);
439 blkg_rwstat_exit(&stats->ios);
8060c47b 440#ifdef CONFIG_BFQ_CGROUP_DEBUG
ea25da48
PV
441 blkg_rwstat_exit(&stats->merged);
442 blkg_rwstat_exit(&stats->service_time);
443 blkg_rwstat_exit(&stats->wait_time);
444 blkg_rwstat_exit(&stats->queued);
c0ce79dc
CH
445 bfq_stat_exit(&stats->time);
446 bfq_stat_exit(&stats->avg_queue_size_sum);
447 bfq_stat_exit(&stats->avg_queue_size_samples);
448 bfq_stat_exit(&stats->dequeue);
449 bfq_stat_exit(&stats->group_wait_time);
450 bfq_stat_exit(&stats->idle_time);
451 bfq_stat_exit(&stats->empty_time);
a33801e8 452#endif
ea25da48
PV
453}
454
455static int bfqg_stats_init(struct bfqg_stats *stats, gfp_t gfp)
456{
fd41e603
TH
457 if (blkg_rwstat_init(&stats->bytes, gfp) ||
458 blkg_rwstat_init(&stats->ios, gfp))
2fc428f6 459 goto error;
fd41e603 460
8060c47b 461#ifdef CONFIG_BFQ_CGROUP_DEBUG
ea25da48
PV
462 if (blkg_rwstat_init(&stats->merged, gfp) ||
463 blkg_rwstat_init(&stats->service_time, gfp) ||
464 blkg_rwstat_init(&stats->wait_time, gfp) ||
465 blkg_rwstat_init(&stats->queued, gfp) ||
c0ce79dc
CH
466 bfq_stat_init(&stats->time, gfp) ||
467 bfq_stat_init(&stats->avg_queue_size_sum, gfp) ||
468 bfq_stat_init(&stats->avg_queue_size_samples, gfp) ||
469 bfq_stat_init(&stats->dequeue, gfp) ||
470 bfq_stat_init(&stats->group_wait_time, gfp) ||
471 bfq_stat_init(&stats->idle_time, gfp) ||
2fc428f6
ZL
472 bfq_stat_init(&stats->empty_time, gfp))
473 goto error;
a33801e8 474#endif
ea25da48
PV
475
476 return 0;
2fc428f6
ZL
477
478error:
479 bfqg_stats_exit(stats);
480 return -ENOMEM;
ea25da48
PV
481}
482
483static struct bfq_group_data *cpd_to_bfqgd(struct blkcg_policy_data *cpd)
484{
485 return cpd ? container_of(cpd, struct bfq_group_data, pd) : NULL;
486}
487
488static struct bfq_group_data *blkcg_to_bfqgd(struct blkcg *blkcg)
489{
490 return cpd_to_bfqgd(blkcg_to_cpd(blkcg, &blkcg_policy_bfq));
491}
492
dfb79af5 493static struct blkcg_policy_data *bfq_cpd_alloc(gfp_t gfp)
ea25da48
PV
494{
495 struct bfq_group_data *bgd;
496
497 bgd = kzalloc(sizeof(*bgd), gfp);
498 if (!bgd)
499 return NULL;
500 return &bgd->pd;
501}
502
dfb79af5 503static void bfq_cpd_init(struct blkcg_policy_data *cpd)
ea25da48
PV
504{
505 struct bfq_group_data *d = cpd_to_bfqgd(cpd);
506
507 d->weight = cgroup_subsys_on_dfl(io_cgrp_subsys) ?
508 CGROUP_WEIGHT_DFL : BFQ_WEIGHT_LEGACY_DFL;
509}
510
dfb79af5 511static void bfq_cpd_free(struct blkcg_policy_data *cpd)
ea25da48
PV
512{
513 kfree(cpd_to_bfqgd(cpd));
514}
515
0a0b4f79
CH
516static struct blkg_policy_data *bfq_pd_alloc(struct gendisk *disk,
517 struct blkcg *blkcg, gfp_t gfp)
ea25da48
PV
518{
519 struct bfq_group *bfqg;
520
0a0b4f79 521 bfqg = kzalloc_node(sizeof(*bfqg), gfp, disk->node_id);
ea25da48
PV
522 if (!bfqg)
523 return NULL;
524
525 if (bfqg_stats_init(&bfqg->stats, gfp)) {
526 kfree(bfqg);
527 return NULL;
528 }
529
8f9bebc3 530 /* see comments in bfq_bic_update_cgroup for why refcounting */
216f7647 531 refcount_set(&bfqg->ref, 1);
ea25da48
PV
532 return &bfqg->pd;
533}
534
dfb79af5 535static void bfq_pd_init(struct blkg_policy_data *pd)
ea25da48
PV
536{
537 struct blkcg_gq *blkg = pd_to_blkg(pd);
538 struct bfq_group *bfqg = blkg_to_bfqg(blkg);
539 struct bfq_data *bfqd = blkg->q->elevator->elevator_data;
540 struct bfq_entity *entity = &bfqg->entity;
541 struct bfq_group_data *d = blkcg_to_bfqgd(blkg->blkcg);
542
543 entity->orig_weight = entity->weight = entity->new_weight = d->weight;
544 entity->my_sched_data = &bfqg->sched_data;
430a67f9
PV
545 entity->last_bfqq_created = NULL;
546
ea25da48
PV
547 bfqg->my_entity = entity; /*
548 * the root_group's will be set to NULL
549 * in bfq_init_queue()
550 */
551 bfqg->bfqd = bfqd;
552 bfqg->active_entities = 0;
60a6e10c 553 bfqg->num_queues_with_pending_reqs = 0;
ea25da48
PV
554 bfqg->rq_pos_tree = RB_ROOT;
555}
556
dfb79af5 557static void bfq_pd_free(struct blkg_policy_data *pd)
ea25da48
PV
558{
559 struct bfq_group *bfqg = pd_to_bfqg(pd);
560
561 bfqg_stats_exit(&bfqg->stats);
8f9bebc3 562 bfqg_put(bfqg);
ea25da48
PV
563}
564
dfb79af5 565static void bfq_pd_reset_stats(struct blkg_policy_data *pd)
ea25da48
PV
566{
567 struct bfq_group *bfqg = pd_to_bfqg(pd);
568
569 bfqg_stats_reset(&bfqg->stats);
570}
571
572static void bfq_group_set_parent(struct bfq_group *bfqg,
573 struct bfq_group *parent)
574{
575 struct bfq_entity *entity;
576
577 entity = &bfqg->entity;
578 entity->parent = parent->my_entity;
579 entity->sched_data = &parent->sched_data;
580}
581
4e54a249 582static void bfq_link_bfqg(struct bfq_data *bfqd, struct bfq_group *bfqg)
ea25da48 583{
4e54a249 584 struct bfq_group *parent;
ea25da48
PV
585 struct bfq_entity *entity;
586
ea25da48
PV
587 /*
588 * Update chain of bfq_groups as we might be handling a leaf group
589 * which, along with some of its relatives, has not been hooked yet
590 * to the private hierarchy of BFQ.
591 */
592 entity = &bfqg->entity;
593 for_each_entity(entity) {
14afc593
CN
594 struct bfq_group *curr_bfqg = container_of(entity,
595 struct bfq_group, entity);
596 if (curr_bfqg != bfqd->root_group) {
597 parent = bfqg_parent(curr_bfqg);
ea25da48
PV
598 if (!parent)
599 parent = bfqd->root_group;
14afc593 600 bfq_group_set_parent(curr_bfqg, parent);
ea25da48
PV
601 }
602 }
4e54a249 603}
ea25da48 604
4e54a249
JK
605struct bfq_group *bfq_bio_bfqg(struct bfq_data *bfqd, struct bio *bio)
606{
607 struct blkcg_gq *blkg = bio->bi_blkg;
075a53b7 608 struct bfq_group *bfqg;
4e54a249 609
075a53b7 610 while (blkg) {
f02be900
YK
611 if (!blkg->online) {
612 blkg = blkg->parent;
613 continue;
614 }
075a53b7 615 bfqg = blkg_to_bfqg(blkg);
f37bf75c 616 if (bfqg->pd.online) {
075a53b7
JK
617 bio_associate_blkg_from_css(bio, &blkg->blkcg->css);
618 return bfqg;
619 }
620 blkg = blkg->parent;
621 }
622 bio_associate_blkg_from_css(bio,
623 &bfqg_to_blkg(bfqd->root_group)->blkcg->css);
624 return bfqd->root_group;
ea25da48
PV
625}
626
627/**
628 * bfq_bfqq_move - migrate @bfqq to @bfqg.
629 * @bfqd: queue descriptor.
630 * @bfqq: the queue to move.
631 * @bfqg: the group to move to.
632 *
633 * Move @bfqq to @bfqg, deactivating it from its old group and reactivating
634 * it on the new one. Avoid putting the entity on the old group idle tree.
635 *
8f9bebc3
PV
636 * Must be called under the scheduler lock, to make sure that the blkg
637 * owning @bfqg does not disappear (see comments in
638 * bfq_bic_update_cgroup on guaranteeing the consistency of blkg
639 * objects).
ea25da48
PV
640 */
641void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
642 struct bfq_group *bfqg)
643{
644 struct bfq_entity *entity = &bfqq->entity;
c5e4cb0f 645 struct bfq_group *old_parent = bfqq_group(bfqq);
60a6e10c 646 bool has_pending_reqs = false;
c5e4cb0f
YK
647
648 /*
649 * No point to move bfqq to the same group, which can happen when
650 * root group is offlined
651 */
652 if (old_parent == bfqg)
653 return;
ea25da48 654
8410f709
YK
655 /*
656 * oom_bfqq is not allowed to move, oom_bfqq will hold ref to root_group
657 * until elevator exit.
658 */
659 if (bfqq == &bfqd->oom_bfqq)
660 return;
fd1bb3ae
PV
661 /*
662 * Get extra reference to prevent bfqq from being freed in
663 * next possible expire or deactivate.
664 */
665 bfqq->ref++;
666
60a6e10c
YK
667 if (entity->in_groups_with_pending_reqs) {
668 has_pending_reqs = true;
669 bfq_del_bfqq_in_groups_with_pending_reqs(bfqq);
670 }
671
ea25da48
PV
672 /* If bfqq is empty, then bfq_bfqq_expire also invokes
673 * bfq_del_bfqq_busy, thereby removing bfqq and its entity
674 * from data structures related to current group. Otherwise we
675 * need to remove bfqq explicitly with bfq_deactivate_bfqq, as
676 * we do below.
677 */
678 if (bfqq == bfqd->in_service_queue)
679 bfq_bfqq_expire(bfqd, bfqd->in_service_queue,
680 false, BFQQE_PREEMPTED);
681
682 if (bfq_bfqq_busy(bfqq))
683 bfq_deactivate_bfqq(bfqd, bfqq, false, false);
33a16a98 684 else if (entity->on_st_or_in_serv)
ea25da48 685 bfq_put_idle_entity(bfq_entity_service_tree(entity), entity);
c5e4cb0f 686 bfqg_and_blkg_put(old_parent);
ea25da48 687
d29bd414
PV
688 if (entity->parent &&
689 entity->parent->last_bfqq_created == bfqq)
690 entity->parent->last_bfqq_created = NULL;
691 else if (bfqd->last_bfqq_created == bfqq)
692 bfqd->last_bfqq_created = NULL;
693
ea25da48
PV
694 entity->parent = bfqg->my_entity;
695 entity->sched_data = &bfqg->sched_data;
8f9bebc3
PV
696 /* pin down bfqg and its associated blkg */
697 bfqg_and_blkg_get(bfqg);
ea25da48 698
60a6e10c
YK
699 if (has_pending_reqs)
700 bfq_add_bfqq_in_groups_with_pending_reqs(bfqq);
701
ea25da48 702 if (bfq_bfqq_busy(bfqq)) {
8cacc5ab
PV
703 if (unlikely(!bfqd->nonrot_with_queueing))
704 bfq_pos_tree_add_move(bfqd, bfqq);
ea25da48
PV
705 bfq_activate_bfqq(bfqd, bfqq);
706 }
707
2d31c684 708 if (!bfqd->in_service_queue && !bfqd->tot_rq_in_driver)
ea25da48 709 bfq_schedule_dispatch(bfqd);
fd1bb3ae 710 /* release extra ref taken above, bfqq may happen to be freed now */
ecedd3d7 711 bfq_put_queue(bfqq);
ea25da48
PV
712}
713
9778369a
PV
714static void bfq_sync_bfqq_move(struct bfq_data *bfqd,
715 struct bfq_queue *sync_bfqq,
716 struct bfq_io_cq *bic,
717 struct bfq_group *bfqg,
718 unsigned int act_idx)
719{
720 struct bfq_queue *bfqq;
721
722 if (!sync_bfqq->new_bfqq && !bfq_bfqq_coop(sync_bfqq)) {
723 /* We are the only user of this bfqq, just move it */
724 if (sync_bfqq->entity.sched_data != &bfqg->sched_data)
725 bfq_bfqq_move(bfqd, sync_bfqq, bfqg);
726 return;
727 }
728
729 /*
730 * The queue was merged to a different queue. Check
731 * that the merge chain still belongs to the same
732 * cgroup.
733 */
734 for (bfqq = sync_bfqq; bfqq; bfqq = bfqq->new_bfqq)
735 if (bfqq->entity.sched_data != &bfqg->sched_data)
736 break;
737 if (bfqq) {
738 /*
739 * Some queue changed cgroup so the merge is not valid
740 * anymore. We cannot easily just cancel the merge (by
741 * clearing new_bfqq) as there may be other processes
742 * using this queue and holding refs to all queues
743 * below sync_bfqq->new_bfqq. Similarly if the merge
744 * already happened, we need to detach from bfqq now
745 * so that we cannot merge bio to a request from the
746 * old cgroup.
747 */
748 bfq_put_cooperator(sync_bfqq);
9778369a 749 bic_set_bfqq(bic, NULL, true, act_idx);
5b0ed596 750 bfq_release_process_ref(bfqd, sync_bfqq);
9778369a
PV
751 }
752}
753
ea25da48 754/**
1d87be82 755 * __bfq_bic_change_cgroup - move @bic to @bfqg.
ea25da48
PV
756 * @bfqd: the queue descriptor.
757 * @bic: the bic to move.
1d87be82 758 * @bfqg: the group to move to.
ea25da48 759 *
8f9bebc3
PV
760 * Move bic to blkcg, assuming that bfqd->lock is held; which makes
761 * sure that the reference to cgroup is valid across the call (see
762 * comments in bfq_bic_update_cgroup on this issue)
ea25da48 763 */
452af7dc
YK
764static void __bfq_bic_change_cgroup(struct bfq_data *bfqd,
765 struct bfq_io_cq *bic,
766 struct bfq_group *bfqg)
ea25da48 767{
9778369a 768 unsigned int act_idx;
ea25da48 769
9778369a
PV
770 for (act_idx = 0; act_idx < bfqd->num_actuators; act_idx++) {
771 struct bfq_queue *async_bfqq = bic_to_bfqq(bic, false, act_idx);
772 struct bfq_queue *sync_bfqq = bic_to_bfqq(bic, true, act_idx);
ea25da48 773
9778369a
PV
774 if (async_bfqq &&
775 async_bfqq->entity.sched_data != &bfqg->sched_data) {
776 bic_set_bfqq(bic, NULL, false, act_idx);
c8997736 777 bfq_release_process_ref(bfqd, async_bfqq);
ea25da48 778 }
ea25da48 779
9778369a
PV
780 if (sync_bfqq)
781 bfq_sync_bfqq_move(bfqd, sync_bfqq, bic, bfqg, act_idx);
ea25da48 782 }
ea25da48
PV
783}
784
785void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio)
786{
787 struct bfq_data *bfqd = bic_to_bfqd(bic);
4e54a249 788 struct bfq_group *bfqg = bfq_bio_bfqg(bfqd, bio);
ea25da48
PV
789 uint64_t serial_nr;
790
4e54a249 791 serial_nr = bfqg_to_blkg(bfqg)->blkcg->css.serial_nr;
ea25da48
PV
792
793 /*
794 * Check whether blkcg has changed. The condition may trigger
795 * spuriously on a newly created cic but there's no harm.
796 */
797 if (unlikely(!bfqd) || likely(bic->blkcg_serial_nr == serial_nr))
4e54a249 798 return;
ea25da48 799
4e54a249
JK
800 /*
801 * New cgroup for this process. Make sure it is linked to bfq internal
802 * cgroup hierarchy.
803 */
804 bfq_link_bfqg(bfqd, bfqg);
805 __bfq_bic_change_cgroup(bfqd, bic, bfqg);
8f9bebc3
PV
806 /*
807 * Update blkg_path for bfq_log_* functions. We cache this
808 * path, and update it here, for the following
809 * reasons. Operations on blkg objects in blk-cgroup are
810 * protected with the request_queue lock, and not with the
811 * lock that protects the instances of this scheduler
812 * (bfqd->lock). This exposes BFQ to the following sort of
813 * race.
814 *
815 * The blkg_lookup performed in bfq_get_queue, protected
816 * through rcu, may happen to return the address of a copy of
817 * the original blkg. If this is the case, then the
818 * bfqg_and_blkg_get performed in bfq_get_queue, to pin down
819 * the blkg, is useless: it does not prevent blk-cgroup code
820 * from destroying both the original blkg and all objects
821 * directly or indirectly referred by the copy of the
822 * blkg.
823 *
824 * On the bright side, destroy operations on a blkg invoke, as
825 * a first step, hooks of the scheduler associated with the
826 * blkg. And these hooks are executed with bfqd->lock held for
827 * BFQ. As a consequence, for any blkg associated with the
828 * request queue this instance of the scheduler is attached
829 * to, we are guaranteed that such a blkg is not destroyed, and
830 * that all the pointers it contains are consistent, while we
831 * are holding bfqd->lock. A blkg_lookup performed with
832 * bfqd->lock held then returns a fully consistent blkg, which
833 * remains consistent until this lock is held.
834 *
835 * Thanks to the last fact, and to the fact that: (1) bfqg has
836 * been obtained through a blkg_lookup in the above
837 * assignment, and (2) bfqd->lock is being held, here we can
838 * safely use the policy data for the involved blkg (i.e., the
839 * field bfqg->pd) to get to the blkg associated with bfqg,
840 * and then we can safely use any field of blkg. After we
841 * release bfqd->lock, even just getting blkg through this
842 * bfqg may cause dangling references to be traversed, as
843 * bfqg->pd may not exist any more.
844 *
845 * In view of the above facts, here we cache, in the bfqg, any
846 * blkg data we may need for this bic, and for its associated
847 * bfq_queue. As of now, we need to cache only the path of the
848 * blkg, which is used in the bfq_log_* functions.
849 *
850 * Finally, note that bfqg itself needs to be protected from
851 * destruction on the blkg_free of the original blkg (which
852 * invokes bfq_pd_free). We use an additional private
853 * refcounter for bfqg, to let it disappear only after no
854 * bfq_queue refers to it any longer.
855 */
856 blkg_path(bfqg_to_blkg(bfqg), bfqg->blkg_path, sizeof(bfqg->blkg_path));
ea25da48 857 bic->blkcg_serial_nr = serial_nr;
ea25da48
PV
858}
859
860/**
861 * bfq_flush_idle_tree - deactivate any entity on the idle tree of @st.
862 * @st: the service tree being flushed.
863 */
864static void bfq_flush_idle_tree(struct bfq_service_tree *st)
865{
866 struct bfq_entity *entity = st->first_idle;
867
868 for (; entity ; entity = st->first_idle)
869 __bfq_deactivate_entity(entity, false);
870}
871
872/**
873 * bfq_reparent_leaf_entity - move leaf entity to the root_group.
874 * @bfqd: the device data structure with the root group.
576682fa
PV
875 * @entity: the entity to move, if entity is a leaf; or the parent entity
876 * of an active leaf entity to move, if entity is not a leaf.
1d87be82 877 * @ioprio_class: I/O priority class to reparent.
ea25da48
PV
878 */
879static void bfq_reparent_leaf_entity(struct bfq_data *bfqd,
576682fa
PV
880 struct bfq_entity *entity,
881 int ioprio_class)
ea25da48 882{
576682fa
PV
883 struct bfq_queue *bfqq;
884 struct bfq_entity *child_entity = entity;
885
886 while (child_entity->my_sched_data) { /* leaf not reached yet */
887 struct bfq_sched_data *child_sd = child_entity->my_sched_data;
888 struct bfq_service_tree *child_st = child_sd->service_tree +
889 ioprio_class;
890 struct rb_root *child_active = &child_st->active;
ea25da48 891
576682fa
PV
892 child_entity = bfq_entity_of(rb_first(child_active));
893
894 if (!child_entity)
895 child_entity = child_sd->in_service_entity;
896 }
897
898 bfqq = bfq_entity_to_bfqq(child_entity);
ea25da48
PV
899 bfq_bfqq_move(bfqd, bfqq, bfqd->root_group);
900}
901
902/**
576682fa 903 * bfq_reparent_active_queues - move to the root group all active queues.
ea25da48
PV
904 * @bfqd: the device data structure with the root group.
905 * @bfqg: the group to move from.
576682fa 906 * @st: the service tree to start the search from.
1d87be82 907 * @ioprio_class: I/O priority class to reparent.
ea25da48 908 */
576682fa
PV
909static void bfq_reparent_active_queues(struct bfq_data *bfqd,
910 struct bfq_group *bfqg,
911 struct bfq_service_tree *st,
912 int ioprio_class)
ea25da48
PV
913{
914 struct rb_root *active = &st->active;
576682fa 915 struct bfq_entity *entity;
ea25da48 916
576682fa
PV
917 while ((entity = bfq_entity_of(rb_first(active))))
918 bfq_reparent_leaf_entity(bfqd, entity, ioprio_class);
ea25da48
PV
919
920 if (bfqg->sched_data.in_service_entity)
921 bfq_reparent_leaf_entity(bfqd,
576682fa
PV
922 bfqg->sched_data.in_service_entity,
923 ioprio_class);
ea25da48
PV
924}
925
926/**
927 * bfq_pd_offline - deactivate the entity associated with @pd,
928 * and reparent its children entities.
929 * @pd: descriptor of the policy going offline.
930 *
931 * blkio already grabs the queue_lock for us, so no need to use
932 * RCU-based magic
933 */
dfb79af5 934static void bfq_pd_offline(struct blkg_policy_data *pd)
ea25da48
PV
935{
936 struct bfq_service_tree *st;
937 struct bfq_group *bfqg = pd_to_bfqg(pd);
938 struct bfq_data *bfqd = bfqg->bfqd;
939 struct bfq_entity *entity = bfqg->my_entity;
940 unsigned long flags;
941 int i;
942
52257ffb
PV
943 spin_lock_irqsave(&bfqd->lock, flags);
944
ea25da48 945 if (!entity) /* root group */
52257ffb 946 goto put_async_queues;
ea25da48 947
ea25da48
PV
948 /*
949 * Empty all service_trees belonging to this group before
950 * deactivating the group itself.
951 */
952 for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) {
953 st = bfqg->sched_data.service_tree + i;
954
ea25da48
PV
955 /*
956 * It may happen that some queues are still active
957 * (busy) upon group destruction (if the corresponding
958 * processes have been forced to terminate). We move
959 * all the leaf entities corresponding to these queues
960 * to the root_group.
961 * Also, it may happen that the group has an entity
962 * in service, which is disconnected from the active
963 * tree: it must be moved, too.
964 * There is no need to put the sync queues, as the
965 * scheduler has taken no reference.
966 */
576682fa 967 bfq_reparent_active_queues(bfqd, bfqg, st, i);
4d38a87f
PV
968
969 /*
970 * The idle tree may still contain bfq_queues
971 * belonging to exited task because they never
972 * migrated to a different cgroup from the one being
973 * destroyed now. In addition, even
974 * bfq_reparent_active_queues() may happen to add some
975 * entities to the idle tree. It happens if, in some
976 * of the calls to bfq_bfqq_move() performed by
977 * bfq_reparent_active_queues(), the queue to move is
978 * empty and gets expired.
979 */
980 bfq_flush_idle_tree(st);
ea25da48
PV
981 }
982
983 __bfq_deactivate_entity(entity, false);
52257ffb
PV
984
985put_async_queues:
ea25da48
PV
986 bfq_put_async_queues(bfqd, bfqg);
987
988 spin_unlock_irqrestore(&bfqd->lock, flags);
989 /*
990 * @blkg is going offline and will be ignored by
991 * blkg_[rw]stat_recursive_sum(). Transfer stats to the parent so
992 * that they don't get lost. If IOs complete after this point, the
993 * stats for them will be lost. Oh well...
994 */
995 bfqg_stats_xfer_dead(bfqg);
996}
997
998void bfq_end_wr_async(struct bfq_data *bfqd)
999{
1000 struct blkcg_gq *blkg;
1001
1002 list_for_each_entry(blkg, &bfqd->queue->blkg_list, q_node) {
1003 struct bfq_group *bfqg = blkg_to_bfqg(blkg);
1004
1005 bfq_end_wr_async_queues(bfqd, bfqg);
1006 }
1007 bfq_end_wr_async_queues(bfqd, bfqd->root_group);
1008}
1009
795fe54c 1010static int bfq_io_show_weight_legacy(struct seq_file *sf, void *v)
ea25da48
PV
1011{
1012 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
1013 struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg);
1014 unsigned int val = 0;
1015
1016 if (bfqgd)
1017 val = bfqgd->weight;
1018
1019 seq_printf(sf, "%u\n", val);
1020
1021 return 0;
1022}
1023
795fe54c
FZ
1024static u64 bfqg_prfill_weight_device(struct seq_file *sf,
1025 struct blkg_policy_data *pd, int off)
5ff047e3 1026{
795fe54c
FZ
1027 struct bfq_group *bfqg = pd_to_bfqg(pd);
1028
1029 if (!bfqg->entity.dev_weight)
1030 return 0;
1031 return __blkg_prfill_u64(sf, pd, bfqg->entity.dev_weight);
1032}
1033
1034static int bfq_io_show_weight(struct seq_file *sf, void *v)
1035{
1036 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
1037 struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg);
1038
1039 seq_printf(sf, "default %u\n", bfqgd->weight);
1040 blkcg_print_blkgs(sf, blkcg, bfqg_prfill_weight_device,
1041 &blkcg_policy_bfq, 0, false);
1042 return 0;
1043}
1044
1045static void bfq_group_set_weight(struct bfq_group *bfqg, u64 weight, u64 dev_weight)
1046{
1047 weight = dev_weight ?: weight;
1048
1049 bfqg->entity.dev_weight = dev_weight;
5ff047e3
FZ
1050 /*
1051 * Setting the prio_changed flag of the entity
1052 * to 1 with new_weight == weight would re-set
1053 * the value of the weight to its ioprio mapping.
1054 * Set the flag only if necessary.
1055 */
1056 if ((unsigned short)weight != bfqg->entity.new_weight) {
1057 bfqg->entity.new_weight = (unsigned short)weight;
1058 /*
1059 * Make sure that the above new value has been
1060 * stored in bfqg->entity.new_weight before
1061 * setting the prio_changed flag. In fact,
1062 * this flag may be read asynchronously (in
1063 * critical sections protected by a different
1064 * lock than that held here), and finding this
1065 * flag set may cause the execution of the code
1066 * for updating parameters whose value may
1067 * depend also on bfqg->entity.new_weight (in
1068 * __bfq_entity_update_weight_prio).
1069 * This barrier makes sure that the new value
1070 * of bfqg->entity.new_weight is correctly
1071 * seen in that code.
1072 */
1073 smp_wmb();
1074 bfqg->entity.prio_changed = 1;
1075 }
1076}
1077
ea25da48
PV
1078static int bfq_io_set_weight_legacy(struct cgroup_subsys_state *css,
1079 struct cftype *cftype,
1080 u64 val)
1081{
1082 struct blkcg *blkcg = css_to_blkcg(css);
1083 struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg);
1084 struct blkcg_gq *blkg;
1085 int ret = -ERANGE;
1086
1087 if (val < BFQ_MIN_WEIGHT || val > BFQ_MAX_WEIGHT)
1088 return ret;
1089
1090 ret = 0;
1091 spin_lock_irq(&blkcg->lock);
1092 bfqgd->weight = (unsigned short)val;
1093 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
1094 struct bfq_group *bfqg = blkg_to_bfqg(blkg);
1095
5ff047e3 1096 if (bfqg)
795fe54c 1097 bfq_group_set_weight(bfqg, val, 0);
ea25da48
PV
1098 }
1099 spin_unlock_irq(&blkcg->lock);
1100
1101 return ret;
1102}
1103
795fe54c
FZ
1104static ssize_t bfq_io_set_device_weight(struct kernfs_open_file *of,
1105 char *buf, size_t nbytes,
1106 loff_t off)
ea25da48 1107{
795fe54c
FZ
1108 int ret;
1109 struct blkg_conf_ctx ctx;
1110 struct blkcg *blkcg = css_to_blkcg(of_css(of));
1111 struct bfq_group *bfqg;
1112 u64 v;
ea25da48 1113
795fe54c 1114 ret = blkg_conf_prep(blkcg, &blkcg_policy_bfq, buf, &ctx);
ea25da48
PV
1115 if (ret)
1116 return ret;
1117
795fe54c
FZ
1118 if (sscanf(ctx.body, "%llu", &v) == 1) {
1119 /* require "default" on dfl */
1120 ret = -ERANGE;
1121 if (!v)
1122 goto out;
1123 } else if (!strcmp(strim(ctx.body), "default")) {
1124 v = 0;
1125 } else {
1126 ret = -EINVAL;
1127 goto out;
1128 }
1129
1130 bfqg = blkg_to_bfqg(ctx.blkg);
1131
1132 ret = -ERANGE;
1133 if (!v || (v >= BFQ_MIN_WEIGHT && v <= BFQ_MAX_WEIGHT)) {
1134 bfq_group_set_weight(bfqg, bfqg->entity.weight, v);
1135 ret = 0;
1136 }
1137out:
1138 blkg_conf_finish(&ctx);
fc8ebd01 1139 return ret ?: nbytes;
ea25da48
PV
1140}
1141
795fe54c
FZ
1142static ssize_t bfq_io_set_weight(struct kernfs_open_file *of,
1143 char *buf, size_t nbytes,
1144 loff_t off)
1145{
1146 char *endp;
1147 int ret;
1148 u64 v;
1149
1150 buf = strim(buf);
1151
1152 /* "WEIGHT" or "default WEIGHT" sets the default weight */
1153 v = simple_strtoull(buf, &endp, 0);
1154 if (*endp == '\0' || sscanf(buf, "default %llu", &v) == 1) {
1155 ret = bfq_io_set_weight_legacy(of_css(of), NULL, v);
1156 return ret ?: nbytes;
1157 }
1158
1159 return bfq_io_set_device_weight(of, buf, nbytes, off);
1160}
1161
a557f1c7 1162static int bfqg_print_rwstat(struct seq_file *sf, void *v)
ea25da48 1163{
a557f1c7
TH
1164 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_rwstat,
1165 &blkcg_policy_bfq, seq_cft(sf)->private, true);
ea25da48
PV
1166 return 0;
1167}
1168
a557f1c7
TH
1169static u64 bfqg_prfill_rwstat_recursive(struct seq_file *sf,
1170 struct blkg_policy_data *pd, int off)
ea25da48 1171{
a557f1c7
TH
1172 struct blkg_rwstat_sample sum;
1173
1174 blkg_rwstat_recursive_sum(pd_to_blkg(pd), &blkcg_policy_bfq, off, &sum);
1175 return __blkg_prfill_rwstat(sf, pd, &sum);
1176}
1177
1178static int bfqg_print_rwstat_recursive(struct seq_file *sf, void *v)
1179{
1180 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1181 bfqg_prfill_rwstat_recursive, &blkcg_policy_bfq,
1182 seq_cft(sf)->private, true);
1183 return 0;
1184}
1185
fd41e603 1186#ifdef CONFIG_BFQ_CGROUP_DEBUG
a557f1c7
TH
1187static int bfqg_print_stat(struct seq_file *sf, void *v)
1188{
1189 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_stat,
1190 &blkcg_policy_bfq, seq_cft(sf)->private, false);
ea25da48
PV
1191 return 0;
1192}
1193
1194static u64 bfqg_prfill_stat_recursive(struct seq_file *sf,
1195 struct blkg_policy_data *pd, int off)
1196{
d6258980
CH
1197 struct blkcg_gq *blkg = pd_to_blkg(pd);
1198 struct blkcg_gq *pos_blkg;
1199 struct cgroup_subsys_state *pos_css;
1200 u64 sum = 0;
1201
1202 lockdep_assert_held(&blkg->q->queue_lock);
1203
1204 rcu_read_lock();
1205 blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
1206 struct bfq_stat *stat;
1207
1208 if (!pos_blkg->online)
1209 continue;
1210
1211 stat = (void *)blkg_to_pd(pos_blkg, &blkcg_policy_bfq) + off;
1212 sum += bfq_stat_read(stat) + atomic64_read(&stat->aux_cnt);
1213 }
1214 rcu_read_unlock();
1215
ea25da48
PV
1216 return __blkg_prfill_u64(sf, pd, sum);
1217}
1218
ea25da48
PV
1219static int bfqg_print_stat_recursive(struct seq_file *sf, void *v)
1220{
1221 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1222 bfqg_prfill_stat_recursive, &blkcg_policy_bfq,
1223 seq_cft(sf)->private, false);
1224 return 0;
1225}
1226
ea25da48
PV
1227static u64 bfqg_prfill_sectors(struct seq_file *sf, struct blkg_policy_data *pd,
1228 int off)
1229{
fd41e603
TH
1230 struct bfq_group *bfqg = blkg_to_bfqg(pd->blkg);
1231 u64 sum = blkg_rwstat_total(&bfqg->stats.bytes);
ea25da48
PV
1232
1233 return __blkg_prfill_u64(sf, pd, sum >> 9);
1234}
1235
1236static int bfqg_print_stat_sectors(struct seq_file *sf, void *v)
1237{
1238 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1239 bfqg_prfill_sectors, &blkcg_policy_bfq, 0, false);
1240 return 0;
1241}
1242
1243static u64 bfqg_prfill_sectors_recursive(struct seq_file *sf,
1244 struct blkg_policy_data *pd, int off)
1245{
7af6fd91 1246 struct blkg_rwstat_sample tmp;
5d0b6e48 1247
fd41e603
TH
1248 blkg_rwstat_recursive_sum(pd->blkg, &blkcg_policy_bfq,
1249 offsetof(struct bfq_group, stats.bytes), &tmp);
ea25da48 1250
7af6fd91
CH
1251 return __blkg_prfill_u64(sf, pd,
1252 (tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE]) >> 9);
ea25da48
PV
1253}
1254
1255static int bfqg_print_stat_sectors_recursive(struct seq_file *sf, void *v)
1256{
1257 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1258 bfqg_prfill_sectors_recursive, &blkcg_policy_bfq, 0,
1259 false);
1260 return 0;
1261}
1262
1263static u64 bfqg_prfill_avg_queue_size(struct seq_file *sf,
1264 struct blkg_policy_data *pd, int off)
1265{
1266 struct bfq_group *bfqg = pd_to_bfqg(pd);
c0ce79dc 1267 u64 samples = bfq_stat_read(&bfqg->stats.avg_queue_size_samples);
ea25da48
PV
1268 u64 v = 0;
1269
1270 if (samples) {
c0ce79dc 1271 v = bfq_stat_read(&bfqg->stats.avg_queue_size_sum);
ea25da48
PV
1272 v = div64_u64(v, samples);
1273 }
1274 __blkg_prfill_u64(sf, pd, v);
1275 return 0;
1276}
1277
1278/* print avg_queue_size */
1279static int bfqg_print_avg_queue_size(struct seq_file *sf, void *v)
1280{
1281 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1282 bfqg_prfill_avg_queue_size, &blkcg_policy_bfq,
1283 0, false);
1284 return 0;
1285}
8060c47b 1286#endif /* CONFIG_BFQ_CGROUP_DEBUG */
ea25da48
PV
1287
1288struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
1289{
1290 int ret;
1291
40e4996e 1292 ret = blkcg_activate_policy(bfqd->queue->disk, &blkcg_policy_bfq);
ea25da48
PV
1293 if (ret)
1294 return NULL;
1295
1296 return blkg_to_bfqg(bfqd->queue->root_blkg);
1297}
1298
1299struct blkcg_policy blkcg_policy_bfq = {
1300 .dfl_cftypes = bfq_blkg_files,
1301 .legacy_cftypes = bfq_blkcg_legacy_files,
1302
1303 .cpd_alloc_fn = bfq_cpd_alloc,
1304 .cpd_init_fn = bfq_cpd_init,
1305 .cpd_bind_fn = bfq_cpd_init,
1306 .cpd_free_fn = bfq_cpd_free,
1307
1308 .pd_alloc_fn = bfq_pd_alloc,
1309 .pd_init_fn = bfq_pd_init,
1310 .pd_offline_fn = bfq_pd_offline,
1311 .pd_free_fn = bfq_pd_free,
1312 .pd_reset_stats_fn = bfq_pd_reset_stats,
1313};
1314
1315struct cftype bfq_blkcg_legacy_files[] = {
1316 {
1317 .name = "bfq.weight",
cf892988 1318 .flags = CFTYPE_NOT_ON_ROOT,
795fe54c 1319 .seq_show = bfq_io_show_weight_legacy,
ea25da48
PV
1320 .write_u64 = bfq_io_set_weight_legacy,
1321 },
795fe54c
FZ
1322 {
1323 .name = "bfq.weight_device",
1324 .flags = CFTYPE_NOT_ON_ROOT,
1325 .seq_show = bfq_io_show_weight,
1326 .write = bfq_io_set_weight,
1327 },
ea25da48
PV
1328
1329 /* statistics, covers only the tasks in the bfqg */
ea25da48
PV
1330 {
1331 .name = "bfq.io_service_bytes",
fd41e603
TH
1332 .private = offsetof(struct bfq_group, stats.bytes),
1333 .seq_show = bfqg_print_rwstat,
ea25da48
PV
1334 },
1335 {
1336 .name = "bfq.io_serviced",
fd41e603
TH
1337 .private = offsetof(struct bfq_group, stats.ios),
1338 .seq_show = bfqg_print_rwstat,
ea25da48 1339 },
8060c47b 1340#ifdef CONFIG_BFQ_CGROUP_DEBUG
a33801e8
LM
1341 {
1342 .name = "bfq.time",
1343 .private = offsetof(struct bfq_group, stats.time),
1344 .seq_show = bfqg_print_stat,
1345 },
1346 {
1347 .name = "bfq.sectors",
1348 .seq_show = bfqg_print_stat_sectors,
1349 },
ea25da48
PV
1350 {
1351 .name = "bfq.io_service_time",
1352 .private = offsetof(struct bfq_group, stats.service_time),
1353 .seq_show = bfqg_print_rwstat,
1354 },
1355 {
1356 .name = "bfq.io_wait_time",
1357 .private = offsetof(struct bfq_group, stats.wait_time),
1358 .seq_show = bfqg_print_rwstat,
1359 },
1360 {
1361 .name = "bfq.io_merged",
1362 .private = offsetof(struct bfq_group, stats.merged),
1363 .seq_show = bfqg_print_rwstat,
1364 },
1365 {
1366 .name = "bfq.io_queued",
1367 .private = offsetof(struct bfq_group, stats.queued),
1368 .seq_show = bfqg_print_rwstat,
1369 },
8060c47b 1370#endif /* CONFIG_BFQ_CGROUP_DEBUG */
ea25da48 1371
636b8fe8 1372 /* the same statistics which cover the bfqg and its descendants */
ea25da48
PV
1373 {
1374 .name = "bfq.io_service_bytes_recursive",
fd41e603
TH
1375 .private = offsetof(struct bfq_group, stats.bytes),
1376 .seq_show = bfqg_print_rwstat_recursive,
ea25da48
PV
1377 },
1378 {
1379 .name = "bfq.io_serviced_recursive",
fd41e603
TH
1380 .private = offsetof(struct bfq_group, stats.ios),
1381 .seq_show = bfqg_print_rwstat_recursive,
ea25da48 1382 },
8060c47b 1383#ifdef CONFIG_BFQ_CGROUP_DEBUG
a33801e8
LM
1384 {
1385 .name = "bfq.time_recursive",
1386 .private = offsetof(struct bfq_group, stats.time),
1387 .seq_show = bfqg_print_stat_recursive,
1388 },
1389 {
1390 .name = "bfq.sectors_recursive",
1391 .seq_show = bfqg_print_stat_sectors_recursive,
1392 },
ea25da48
PV
1393 {
1394 .name = "bfq.io_service_time_recursive",
1395 .private = offsetof(struct bfq_group, stats.service_time),
1396 .seq_show = bfqg_print_rwstat_recursive,
1397 },
1398 {
1399 .name = "bfq.io_wait_time_recursive",
1400 .private = offsetof(struct bfq_group, stats.wait_time),
1401 .seq_show = bfqg_print_rwstat_recursive,
1402 },
1403 {
1404 .name = "bfq.io_merged_recursive",
1405 .private = offsetof(struct bfq_group, stats.merged),
1406 .seq_show = bfqg_print_rwstat_recursive,
1407 },
1408 {
1409 .name = "bfq.io_queued_recursive",
1410 .private = offsetof(struct bfq_group, stats.queued),
1411 .seq_show = bfqg_print_rwstat_recursive,
1412 },
1413 {
1414 .name = "bfq.avg_queue_size",
1415 .seq_show = bfqg_print_avg_queue_size,
1416 },
1417 {
1418 .name = "bfq.group_wait_time",
1419 .private = offsetof(struct bfq_group, stats.group_wait_time),
1420 .seq_show = bfqg_print_stat,
1421 },
1422 {
1423 .name = "bfq.idle_time",
1424 .private = offsetof(struct bfq_group, stats.idle_time),
1425 .seq_show = bfqg_print_stat,
1426 },
1427 {
1428 .name = "bfq.empty_time",
1429 .private = offsetof(struct bfq_group, stats.empty_time),
1430 .seq_show = bfqg_print_stat,
1431 },
1432 {
1433 .name = "bfq.dequeue",
1434 .private = offsetof(struct bfq_group, stats.dequeue),
1435 .seq_show = bfqg_print_stat,
1436 },
8060c47b 1437#endif /* CONFIG_BFQ_CGROUP_DEBUG */
ea25da48
PV
1438 { } /* terminate */
1439};
1440
1441struct cftype bfq_blkg_files[] = {
1442 {
1443 .name = "bfq.weight",
cf892988 1444 .flags = CFTYPE_NOT_ON_ROOT,
ea25da48
PV
1445 .seq_show = bfq_io_show_weight,
1446 .write = bfq_io_set_weight,
1447 },
1448 {} /* terminate */
1449};
1450
1451#else /* CONFIG_BFQ_GROUP_IOSCHED */
1452
ea25da48
PV
1453void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
1454 struct bfq_group *bfqg) {}
1455
1456void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg)
1457{
1458 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
1459
1460 entity->weight = entity->new_weight;
1461 entity->orig_weight = entity->new_weight;
1462 if (bfqq) {
1463 bfqq->ioprio = bfqq->new_ioprio;
1464 bfqq->ioprio_class = bfqq->new_ioprio_class;
1465 }
1466 entity->sched_data = &bfqg->sched_data;
1467}
1468
1469void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) {}
1470
1471void bfq_end_wr_async(struct bfq_data *bfqd)
1472{
1473 bfq_end_wr_async_queues(bfqd, bfqd->root_group);
1474}
1475
4e54a249 1476struct bfq_group *bfq_bio_bfqg(struct bfq_data *bfqd, struct bio *bio)
ea25da48
PV
1477{
1478 return bfqd->root_group;
1479}
1480
1481struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
1482{
1483 return bfqq->bfqd->root_group;
1484}
1485
4d8340d0
PV
1486void bfqg_and_blkg_put(struct bfq_group *bfqg) {}
1487
ea25da48
PV
1488struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
1489{
1490 struct bfq_group *bfqg;
1491 int i;
1492
1493 bfqg = kmalloc_node(sizeof(*bfqg), GFP_KERNEL | __GFP_ZERO, node);
1494 if (!bfqg)
1495 return NULL;
1496
1497 for (i = 0; i < BFQ_IOPRIO_CLASSES; i++)
1498 bfqg->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT;
1499
1500 return bfqg;
1501}
1502#endif /* CONFIG_BFQ_GROUP_IOSCHED */