Merge branches 'pm-domains' and 'pm-core'
[linux-block.git] / net / sched / sch_ets.c
CommitLineData
dcc68b4d
PM
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * net/sched/sch_ets.c Enhanced Transmission Selection scheduler
4 *
5 * Description
6 * -----------
7 *
8 * The Enhanced Transmission Selection scheduler is a classful queuing
9 * discipline that merges functionality of PRIO and DRR qdiscs in one scheduler.
10 * ETS makes it easy to configure a set of strict and bandwidth-sharing bands to
11 * implement the transmission selection described in 802.1Qaz.
12 *
13 * Although ETS is technically classful, it's not possible to add and remove
14 * classes at will. Instead one specifies number of classes, how many are
15 * PRIO-like and how many DRR-like, and quanta for the latter.
16 *
17 * Algorithm
18 * ---------
19 *
20 * The strict classes, if any, are tried for traffic first: first band 0, if it
21 * has no traffic then band 1, etc.
22 *
23 * When there is no traffic in any of the strict queues, the bandwidth-sharing
24 * ones are tried next. Each band is assigned a deficit counter, initialized to
25 * "quantum" of that band. ETS maintains a list of active bandwidth-sharing
26 * bands whose qdiscs are non-empty. A packet is dequeued from the band at the
27 * head of the list if the packet size is smaller or equal to the deficit
28 * counter. If the counter is too small, it is increased by "quantum" and the
29 * scheduler moves on to the next band in the active list.
30 */
31
32#include <linux/module.h>
33#include <net/gen_stats.h>
34#include <net/netlink.h>
35#include <net/pkt_cls.h>
36#include <net/pkt_sched.h>
37#include <net/sch_generic.h>
38
39struct ets_class {
40 struct list_head alist; /* In struct ets_sched.active. */
41 struct Qdisc *qdisc;
42 u32 quantum;
43 u32 deficit;
50dc9a85 44 struct gnet_stats_basic_sync bstats;
dcc68b4d
PM
45 struct gnet_stats_queue qstats;
46};
47
48struct ets_sched {
49 struct list_head active;
50 struct tcf_proto __rcu *filter_list;
51 struct tcf_block *block;
52 unsigned int nbands;
53 unsigned int nstrict;
54 u8 prio2band[TC_PRIO_MAX + 1];
55 struct ets_class classes[TCQ_ETS_MAX_BANDS];
56};
57
58static const struct nla_policy ets_policy[TCA_ETS_MAX + 1] = {
59 [TCA_ETS_NBANDS] = { .type = NLA_U8 },
60 [TCA_ETS_NSTRICT] = { .type = NLA_U8 },
61 [TCA_ETS_QUANTA] = { .type = NLA_NESTED },
62 [TCA_ETS_PRIOMAP] = { .type = NLA_NESTED },
63};
64
65static const struct nla_policy ets_priomap_policy[TCA_ETS_MAX + 1] = {
66 [TCA_ETS_PRIOMAP_BAND] = { .type = NLA_U8 },
67};
68
69static const struct nla_policy ets_quanta_policy[TCA_ETS_MAX + 1] = {
70 [TCA_ETS_QUANTA_BAND] = { .type = NLA_U32 },
71};
72
73static const struct nla_policy ets_class_policy[TCA_ETS_MAX + 1] = {
74 [TCA_ETS_QUANTA_BAND] = { .type = NLA_U32 },
75};
76
77static int ets_quantum_parse(struct Qdisc *sch, const struct nlattr *attr,
78 unsigned int *quantum,
79 struct netlink_ext_ack *extack)
80{
81 *quantum = nla_get_u32(attr);
82 if (!*quantum) {
83 NL_SET_ERR_MSG(extack, "ETS quantum cannot be zero");
84 return -EINVAL;
85 }
86 return 0;
87}
88
89static struct ets_class *
90ets_class_from_arg(struct Qdisc *sch, unsigned long arg)
91{
92 struct ets_sched *q = qdisc_priv(sch);
93
94 return &q->classes[arg - 1];
95}
96
97static u32 ets_class_id(struct Qdisc *sch, const struct ets_class *cl)
98{
99 struct ets_sched *q = qdisc_priv(sch);
100 int band = cl - q->classes;
101
102 return TC_H_MAKE(sch->handle, band + 1);
103}
104
d35eb52b
PM
105static void ets_offload_change(struct Qdisc *sch)
106{
107 struct net_device *dev = qdisc_dev(sch);
108 struct ets_sched *q = qdisc_priv(sch);
109 struct tc_ets_qopt_offload qopt;
110 unsigned int w_psum_prev = 0;
111 unsigned int q_psum = 0;
112 unsigned int q_sum = 0;
113 unsigned int quantum;
114 unsigned int w_psum;
115 unsigned int weight;
116 unsigned int i;
117
118 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
119 return;
120
121 qopt.command = TC_ETS_REPLACE;
122 qopt.handle = sch->handle;
123 qopt.parent = sch->parent;
124 qopt.replace_params.bands = q->nbands;
125 qopt.replace_params.qstats = &sch->qstats;
126 memcpy(&qopt.replace_params.priomap,
127 q->prio2band, sizeof(q->prio2band));
128
129 for (i = 0; i < q->nbands; i++)
130 q_sum += q->classes[i].quantum;
131
132 for (i = 0; i < q->nbands; i++) {
133 quantum = q->classes[i].quantum;
134 q_psum += quantum;
135 w_psum = quantum ? q_psum * 100 / q_sum : 0;
136 weight = w_psum - w_psum_prev;
137 w_psum_prev = w_psum;
138
139 qopt.replace_params.quanta[i] = quantum;
140 qopt.replace_params.weights[i] = weight;
141 }
142
143 dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_ETS, &qopt);
144}
145
146static void ets_offload_destroy(struct Qdisc *sch)
147{
148 struct net_device *dev = qdisc_dev(sch);
149 struct tc_ets_qopt_offload qopt;
150
151 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
152 return;
153
154 qopt.command = TC_ETS_DESTROY;
155 qopt.handle = sch->handle;
156 qopt.parent = sch->parent;
157 dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_ETS, &qopt);
158}
159
160static void ets_offload_graft(struct Qdisc *sch, struct Qdisc *new,
161 struct Qdisc *old, unsigned long arg,
162 struct netlink_ext_ack *extack)
163{
164 struct net_device *dev = qdisc_dev(sch);
165 struct tc_ets_qopt_offload qopt;
166
167 qopt.command = TC_ETS_GRAFT;
168 qopt.handle = sch->handle;
169 qopt.parent = sch->parent;
170 qopt.graft_params.band = arg - 1;
171 qopt.graft_params.child_handle = new->handle;
172
173 qdisc_offload_graft_helper(dev, sch, new, old, TC_SETUP_QDISC_ETS,
174 &qopt, extack);
175}
176
177static int ets_offload_dump(struct Qdisc *sch)
178{
179 struct tc_ets_qopt_offload qopt;
180
181 qopt.command = TC_ETS_STATS;
182 qopt.handle = sch->handle;
183 qopt.parent = sch->parent;
184 qopt.stats.bstats = &sch->bstats;
185 qopt.stats.qstats = &sch->qstats;
186
187 return qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_ETS, &qopt);
188}
189
dcc68b4d
PM
190static bool ets_class_is_strict(struct ets_sched *q, const struct ets_class *cl)
191{
192 unsigned int band = cl - q->classes;
193
194 return band < q->nstrict;
195}
196
197static int ets_class_change(struct Qdisc *sch, u32 classid, u32 parentid,
198 struct nlattr **tca, unsigned long *arg,
199 struct netlink_ext_ack *extack)
200{
201 struct ets_class *cl = ets_class_from_arg(sch, *arg);
202 struct ets_sched *q = qdisc_priv(sch);
203 struct nlattr *opt = tca[TCA_OPTIONS];
204 struct nlattr *tb[TCA_ETS_MAX + 1];
205 unsigned int quantum;
206 int err;
207
208 /* Classes can be added and removed only through Qdisc_ops.change
209 * interface.
210 */
211 if (!cl) {
212 NL_SET_ERR_MSG(extack, "Fine-grained class addition and removal is not supported");
213 return -EOPNOTSUPP;
214 }
215
216 if (!opt) {
217 NL_SET_ERR_MSG(extack, "ETS options are required for this operation");
218 return -EINVAL;
219 }
220
221 err = nla_parse_nested(tb, TCA_ETS_MAX, opt, ets_class_policy, extack);
222 if (err < 0)
223 return err;
224
225 if (!tb[TCA_ETS_QUANTA_BAND])
226 /* Nothing to configure. */
227 return 0;
228
229 if (ets_class_is_strict(q, cl)) {
230 NL_SET_ERR_MSG(extack, "Strict bands do not have a configurable quantum");
231 return -EINVAL;
232 }
233
234 err = ets_quantum_parse(sch, tb[TCA_ETS_QUANTA_BAND], &quantum,
235 extack);
236 if (err)
237 return err;
238
239 sch_tree_lock(sch);
240 cl->quantum = quantum;
241 sch_tree_unlock(sch);
d35eb52b
PM
242
243 ets_offload_change(sch);
dcc68b4d
PM
244 return 0;
245}
246
247static int ets_class_graft(struct Qdisc *sch, unsigned long arg,
248 struct Qdisc *new, struct Qdisc **old,
249 struct netlink_ext_ack *extack)
250{
251 struct ets_class *cl = ets_class_from_arg(sch, arg);
252
253 if (!new) {
254 new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
255 ets_class_id(sch, cl), NULL);
256 if (!new)
257 new = &noop_qdisc;
258 else
259 qdisc_hash_add(new, true);
260 }
261
262 *old = qdisc_replace(sch, new, &cl->qdisc);
d35eb52b 263 ets_offload_graft(sch, new, *old, arg, extack);
dcc68b4d
PM
264 return 0;
265}
266
267static struct Qdisc *ets_class_leaf(struct Qdisc *sch, unsigned long arg)
268{
269 struct ets_class *cl = ets_class_from_arg(sch, arg);
270
271 return cl->qdisc;
272}
273
274static unsigned long ets_class_find(struct Qdisc *sch, u32 classid)
275{
276 unsigned long band = TC_H_MIN(classid);
277 struct ets_sched *q = qdisc_priv(sch);
278
279 if (band - 1 >= q->nbands)
280 return 0;
281 return band;
282}
283
284static void ets_class_qlen_notify(struct Qdisc *sch, unsigned long arg)
285{
286 struct ets_class *cl = ets_class_from_arg(sch, arg);
287 struct ets_sched *q = qdisc_priv(sch);
288
289 /* We get notified about zero-length child Qdiscs as well if they are
290 * offloaded. Those aren't on the active list though, so don't attempt
291 * to remove them.
292 */
293 if (!ets_class_is_strict(q, cl) && sch->q.qlen)
294 list_del(&cl->alist);
295}
296
297static int ets_class_dump(struct Qdisc *sch, unsigned long arg,
298 struct sk_buff *skb, struct tcmsg *tcm)
299{
300 struct ets_class *cl = ets_class_from_arg(sch, arg);
301 struct ets_sched *q = qdisc_priv(sch);
302 struct nlattr *nest;
303
304 tcm->tcm_parent = TC_H_ROOT;
305 tcm->tcm_handle = ets_class_id(sch, cl);
306 tcm->tcm_info = cl->qdisc->handle;
307
308 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
309 if (!nest)
310 goto nla_put_failure;
311 if (!ets_class_is_strict(q, cl)) {
312 if (nla_put_u32(skb, TCA_ETS_QUANTA_BAND, cl->quantum))
313 goto nla_put_failure;
314 }
315 return nla_nest_end(skb, nest);
316
317nla_put_failure:
318 nla_nest_cancel(skb, nest);
319 return -EMSGSIZE;
320}
321
322static int ets_class_dump_stats(struct Qdisc *sch, unsigned long arg,
323 struct gnet_dump *d)
324{
325 struct ets_class *cl = ets_class_from_arg(sch, arg);
326 struct Qdisc *cl_q = cl->qdisc;
327
29cbcd85 328 if (gnet_stats_copy_basic(d, NULL, &cl_q->bstats, true) < 0 ||
dcc68b4d
PM
329 qdisc_qstats_copy(d, cl_q) < 0)
330 return -1;
331
332 return 0;
333}
334
335static void ets_qdisc_walk(struct Qdisc *sch, struct qdisc_walker *arg)
336{
337 struct ets_sched *q = qdisc_priv(sch);
338 int i;
339
340 if (arg->stop)
341 return;
342
343 for (i = 0; i < q->nbands; i++) {
344 if (arg->count < arg->skip) {
345 arg->count++;
346 continue;
347 }
348 if (arg->fn(sch, i + 1, arg) < 0) {
349 arg->stop = 1;
350 break;
351 }
352 arg->count++;
353 }
354}
355
356static struct tcf_block *
357ets_qdisc_tcf_block(struct Qdisc *sch, unsigned long cl,
358 struct netlink_ext_ack *extack)
359{
360 struct ets_sched *q = qdisc_priv(sch);
361
362 if (cl) {
363 NL_SET_ERR_MSG(extack, "ETS classid must be zero");
364 return NULL;
365 }
366
367 return q->block;
368}
369
370static unsigned long ets_qdisc_bind_tcf(struct Qdisc *sch, unsigned long parent,
371 u32 classid)
372{
373 return ets_class_find(sch, classid);
374}
375
376static void ets_qdisc_unbind_tcf(struct Qdisc *sch, unsigned long arg)
377{
378}
379
380static struct ets_class *ets_classify(struct sk_buff *skb, struct Qdisc *sch,
381 int *qerr)
382{
383 struct ets_sched *q = qdisc_priv(sch);
384 u32 band = skb->priority;
385 struct tcf_result res;
386 struct tcf_proto *fl;
387 int err;
388
389 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
390 if (TC_H_MAJ(skb->priority) != sch->handle) {
391 fl = rcu_dereference_bh(q->filter_list);
3aa26055 392 err = tcf_classify(skb, NULL, fl, &res, false);
dcc68b4d
PM
393#ifdef CONFIG_NET_CLS_ACT
394 switch (err) {
395 case TC_ACT_STOLEN:
396 case TC_ACT_QUEUED:
397 case TC_ACT_TRAP:
398 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
964201de 399 fallthrough;
dcc68b4d
PM
400 case TC_ACT_SHOT:
401 return NULL;
402 }
403#endif
404 if (!fl || err < 0) {
405 if (TC_H_MAJ(band))
406 band = 0;
407 return &q->classes[q->prio2band[band & TC_PRIO_MAX]];
408 }
409 band = res.classid;
410 }
411 band = TC_H_MIN(band) - 1;
412 if (band >= q->nbands)
413 return &q->classes[q->prio2band[0]];
414 return &q->classes[band];
415}
416
ac5c66f2 417static int ets_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
dcc68b4d
PM
418 struct sk_buff **to_free)
419{
420 unsigned int len = qdisc_pkt_len(skb);
421 struct ets_sched *q = qdisc_priv(sch);
422 struct ets_class *cl;
423 int err = 0;
424 bool first;
425
426 cl = ets_classify(skb, sch, &err);
427 if (!cl) {
428 if (err & __NET_XMIT_BYPASS)
429 qdisc_qstats_drop(sch);
430 __qdisc_drop(skb, to_free);
431 return err;
432 }
433
434 first = !cl->qdisc->q.qlen;
ac5c66f2 435 err = qdisc_enqueue(skb, cl->qdisc, to_free);
dcc68b4d
PM
436 if (unlikely(err != NET_XMIT_SUCCESS)) {
437 if (net_xmit_drop_count(err)) {
438 cl->qstats.drops++;
439 qdisc_qstats_drop(sch);
440 }
441 return err;
442 }
443
444 if (first && !ets_class_is_strict(q, cl)) {
445 list_add_tail(&cl->alist, &q->active);
446 cl->deficit = cl->quantum;
447 }
448
449 sch->qstats.backlog += len;
450 sch->q.qlen++;
451 return err;
452}
453
454static struct sk_buff *
455ets_qdisc_dequeue_skb(struct Qdisc *sch, struct sk_buff *skb)
456{
457 qdisc_bstats_update(sch, skb);
458 qdisc_qstats_backlog_dec(sch, skb);
459 sch->q.qlen--;
460 return skb;
461}
462
463static struct sk_buff *ets_qdisc_dequeue(struct Qdisc *sch)
464{
465 struct ets_sched *q = qdisc_priv(sch);
466 struct ets_class *cl;
467 struct sk_buff *skb;
468 unsigned int band;
469 unsigned int len;
470
471 while (1) {
472 for (band = 0; band < q->nstrict; band++) {
473 cl = &q->classes[band];
474 skb = qdisc_dequeue_peeked(cl->qdisc);
475 if (skb)
476 return ets_qdisc_dequeue_skb(sch, skb);
477 }
478
479 if (list_empty(&q->active))
480 goto out;
481
482 cl = list_first_entry(&q->active, struct ets_class, alist);
483 skb = cl->qdisc->ops->peek(cl->qdisc);
484 if (!skb) {
485 qdisc_warn_nonwc(__func__, cl->qdisc);
486 goto out;
487 }
488
489 len = qdisc_pkt_len(skb);
490 if (len <= cl->deficit) {
491 cl->deficit -= len;
492 skb = qdisc_dequeue_peeked(cl->qdisc);
493 if (unlikely(!skb))
494 goto out;
495 if (cl->qdisc->q.qlen == 0)
496 list_del(&cl->alist);
497 return ets_qdisc_dequeue_skb(sch, skb);
498 }
499
500 cl->deficit += cl->quantum;
501 list_move_tail(&cl->alist, &q->active);
502 }
503out:
504 return NULL;
505}
506
507static int ets_qdisc_priomap_parse(struct nlattr *priomap_attr,
508 unsigned int nbands, u8 *priomap,
509 struct netlink_ext_ack *extack)
510{
511 const struct nlattr *attr;
512 int prio = 0;
513 u8 band;
514 int rem;
515 int err;
516
517 err = __nla_validate_nested(priomap_attr, TCA_ETS_MAX,
518 ets_priomap_policy, NL_VALIDATE_STRICT,
519 extack);
520 if (err)
521 return err;
522
523 nla_for_each_nested(attr, priomap_attr, rem) {
524 switch (nla_type(attr)) {
525 case TCA_ETS_PRIOMAP_BAND:
526 if (prio > TC_PRIO_MAX) {
527 NL_SET_ERR_MSG_MOD(extack, "Too many priorities in ETS priomap");
528 return -EINVAL;
529 }
530 band = nla_get_u8(attr);
531 if (band >= nbands) {
532 NL_SET_ERR_MSG_MOD(extack, "Invalid band number in ETS priomap");
533 return -EINVAL;
534 }
535 priomap[prio++] = band;
536 break;
537 default:
538 WARN_ON_ONCE(1); /* Validate should have caught this. */
539 return -EINVAL;
540 }
541 }
542
543 return 0;
544}
545
546static int ets_qdisc_quanta_parse(struct Qdisc *sch, struct nlattr *quanta_attr,
547 unsigned int nbands, unsigned int nstrict,
548 unsigned int *quanta,
549 struct netlink_ext_ack *extack)
550{
551 const struct nlattr *attr;
552 int band = nstrict;
553 int rem;
554 int err;
555
556 err = __nla_validate_nested(quanta_attr, TCA_ETS_MAX,
557 ets_quanta_policy, NL_VALIDATE_STRICT,
558 extack);
559 if (err < 0)
560 return err;
561
562 nla_for_each_nested(attr, quanta_attr, rem) {
563 switch (nla_type(attr)) {
564 case TCA_ETS_QUANTA_BAND:
565 if (band >= nbands) {
566 NL_SET_ERR_MSG_MOD(extack, "ETS quanta has more values than bands");
567 return -EINVAL;
568 }
569 err = ets_quantum_parse(sch, attr, &quanta[band++],
570 extack);
571 if (err)
572 return err;
573 break;
574 default:
575 WARN_ON_ONCE(1); /* Validate should have caught this. */
576 return -EINVAL;
577 }
578 }
579
580 return 0;
581}
582
583static int ets_qdisc_change(struct Qdisc *sch, struct nlattr *opt,
584 struct netlink_ext_ack *extack)
585{
586 unsigned int quanta[TCQ_ETS_MAX_BANDS] = {0};
587 struct Qdisc *queues[TCQ_ETS_MAX_BANDS];
588 struct ets_sched *q = qdisc_priv(sch);
589 struct nlattr *tb[TCA_ETS_MAX + 1];
590 unsigned int oldbands = q->nbands;
591 u8 priomap[TC_PRIO_MAX + 1];
592 unsigned int nstrict = 0;
593 unsigned int nbands;
594 unsigned int i;
595 int err;
596
597 if (!opt) {
598 NL_SET_ERR_MSG(extack, "ETS options are required for this operation");
599 return -EINVAL;
600 }
601
602 err = nla_parse_nested(tb, TCA_ETS_MAX, opt, ets_policy, extack);
603 if (err < 0)
604 return err;
605
606 if (!tb[TCA_ETS_NBANDS]) {
607 NL_SET_ERR_MSG_MOD(extack, "Number of bands is a required argument");
608 return -EINVAL;
609 }
610 nbands = nla_get_u8(tb[TCA_ETS_NBANDS]);
611 if (nbands < 1 || nbands > TCQ_ETS_MAX_BANDS) {
612 NL_SET_ERR_MSG_MOD(extack, "Invalid number of bands");
613 return -EINVAL;
614 }
615 /* Unless overridden, traffic goes to the last band. */
616 memset(priomap, nbands - 1, sizeof(priomap));
617
618 if (tb[TCA_ETS_NSTRICT]) {
619 nstrict = nla_get_u8(tb[TCA_ETS_NSTRICT]);
620 if (nstrict > nbands) {
621 NL_SET_ERR_MSG_MOD(extack, "Invalid number of strict bands");
622 return -EINVAL;
623 }
624 }
625
626 if (tb[TCA_ETS_PRIOMAP]) {
627 err = ets_qdisc_priomap_parse(tb[TCA_ETS_PRIOMAP],
628 nbands, priomap, extack);
629 if (err)
630 return err;
631 }
632
633 if (tb[TCA_ETS_QUANTA]) {
634 err = ets_qdisc_quanta_parse(sch, tb[TCA_ETS_QUANTA],
635 nbands, nstrict, quanta, extack);
636 if (err)
637 return err;
638 }
639 /* If there are more bands than strict + quanta provided, the remaining
640 * ones are ETS with quantum of MTU. Initialize the missing values here.
641 */
642 for (i = nstrict; i < nbands; i++) {
643 if (!quanta[i])
644 quanta[i] = psched_mtu(qdisc_dev(sch));
645 }
646
647 /* Before commit, make sure we can allocate all new qdiscs */
648 for (i = oldbands; i < nbands; i++) {
649 queues[i] = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
650 ets_class_id(sch, &q->classes[i]),
651 extack);
652 if (!queues[i]) {
653 while (i > oldbands)
654 qdisc_put(queues[--i]);
655 return -ENOMEM;
656 }
657 }
658
659 sch_tree_lock(sch);
660
661 q->nbands = nbands;
cd9b50ad 662 for (i = nstrict; i < q->nstrict; i++) {
cd9b50ad
DC
663 if (q->classes[i].qdisc->q.qlen) {
664 list_add_tail(&q->classes[i].alist, &q->active);
665 q->classes[i].deficit = quanta[i];
666 }
667 }
de6d2592 668 for (i = q->nbands; i < oldbands; i++) {
c062f2a0 669 if (i >= q->nstrict && q->classes[i].qdisc->q.qlen)
de6d2592 670 list_del(&q->classes[i].alist);
c062f2a0 671 qdisc_tree_flush_backlog(q->classes[i].qdisc);
de6d2592 672 }
dcc68b4d
PM
673 q->nstrict = nstrict;
674 memcpy(q->prio2band, priomap, sizeof(priomap));
675
dcc68b4d
PM
676 for (i = 0; i < q->nbands; i++)
677 q->classes[i].quantum = quanta[i];
678
679 for (i = oldbands; i < q->nbands; i++) {
680 q->classes[i].qdisc = queues[i];
681 if (q->classes[i].qdisc != &noop_qdisc)
682 qdisc_hash_add(q->classes[i].qdisc, true);
683 }
684
685 sch_tree_unlock(sch);
686
d35eb52b 687 ets_offload_change(sch);
dcc68b4d
PM
688 for (i = q->nbands; i < oldbands; i++) {
689 qdisc_put(q->classes[i].qdisc);
454d3e1a
DC
690 q->classes[i].qdisc = NULL;
691 q->classes[i].quantum = 0;
692 q->classes[i].deficit = 0;
50dc9a85 693 gnet_stats_basic_sync_init(&q->classes[i].bstats);
454d3e1a 694 memset(&q->classes[i].qstats, 0, sizeof(q->classes[i].qstats));
dcc68b4d
PM
695 }
696 return 0;
697}
698
699static int ets_qdisc_init(struct Qdisc *sch, struct nlattr *opt,
700 struct netlink_ext_ack *extack)
701{
702 struct ets_sched *q = qdisc_priv(sch);
454d3e1a 703 int err, i;
dcc68b4d
PM
704
705 if (!opt)
706 return -EINVAL;
707
708 err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
709 if (err)
710 return err;
711
712 INIT_LIST_HEAD(&q->active);
454d3e1a
DC
713 for (i = 0; i < TCQ_ETS_MAX_BANDS; i++)
714 INIT_LIST_HEAD(&q->classes[i].alist);
715
dcc68b4d
PM
716 return ets_qdisc_change(sch, opt, extack);
717}
718
719static void ets_qdisc_reset(struct Qdisc *sch)
720{
721 struct ets_sched *q = qdisc_priv(sch);
722 int band;
723
724 for (band = q->nstrict; band < q->nbands; band++) {
725 if (q->classes[band].qdisc->q.qlen)
726 list_del(&q->classes[band].alist);
727 }
728 for (band = 0; band < q->nbands; band++)
729 qdisc_reset(q->classes[band].qdisc);
730 sch->qstats.backlog = 0;
731 sch->q.qlen = 0;
732}
733
734static void ets_qdisc_destroy(struct Qdisc *sch)
735{
736 struct ets_sched *q = qdisc_priv(sch);
737 int band;
738
d35eb52b 739 ets_offload_destroy(sch);
dcc68b4d
PM
740 tcf_block_put(q->block);
741 for (band = 0; band < q->nbands; band++)
742 qdisc_put(q->classes[band].qdisc);
743}
744
745static int ets_qdisc_dump(struct Qdisc *sch, struct sk_buff *skb)
746{
747 struct ets_sched *q = qdisc_priv(sch);
748 struct nlattr *opts;
749 struct nlattr *nest;
750 int band;
751 int prio;
d35eb52b
PM
752 int err;
753
754 err = ets_offload_dump(sch);
755 if (err)
756 return err;
dcc68b4d
PM
757
758 opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
759 if (!opts)
760 goto nla_err;
761
762 if (nla_put_u8(skb, TCA_ETS_NBANDS, q->nbands))
763 goto nla_err;
764
765 if (q->nstrict &&
766 nla_put_u8(skb, TCA_ETS_NSTRICT, q->nstrict))
767 goto nla_err;
768
769 if (q->nbands > q->nstrict) {
770 nest = nla_nest_start(skb, TCA_ETS_QUANTA);
771 if (!nest)
772 goto nla_err;
773
774 for (band = q->nstrict; band < q->nbands; band++) {
775 if (nla_put_u32(skb, TCA_ETS_QUANTA_BAND,
776 q->classes[band].quantum))
777 goto nla_err;
778 }
779
780 nla_nest_end(skb, nest);
781 }
782
783 nest = nla_nest_start(skb, TCA_ETS_PRIOMAP);
784 if (!nest)
785 goto nla_err;
786
787 for (prio = 0; prio <= TC_PRIO_MAX; prio++) {
788 if (nla_put_u8(skb, TCA_ETS_PRIOMAP_BAND, q->prio2band[prio]))
789 goto nla_err;
790 }
791
792 nla_nest_end(skb, nest);
793
794 return nla_nest_end(skb, opts);
795
796nla_err:
797 nla_nest_cancel(skb, opts);
798 return -EMSGSIZE;
799}
800
801static const struct Qdisc_class_ops ets_class_ops = {
802 .change = ets_class_change,
803 .graft = ets_class_graft,
804 .leaf = ets_class_leaf,
805 .find = ets_class_find,
806 .qlen_notify = ets_class_qlen_notify,
807 .dump = ets_class_dump,
808 .dump_stats = ets_class_dump_stats,
809 .walk = ets_qdisc_walk,
810 .tcf_block = ets_qdisc_tcf_block,
811 .bind_tcf = ets_qdisc_bind_tcf,
812 .unbind_tcf = ets_qdisc_unbind_tcf,
813};
814
815static struct Qdisc_ops ets_qdisc_ops __read_mostly = {
816 .cl_ops = &ets_class_ops,
817 .id = "ets",
818 .priv_size = sizeof(struct ets_sched),
819 .enqueue = ets_qdisc_enqueue,
820 .dequeue = ets_qdisc_dequeue,
821 .peek = qdisc_peek_dequeued,
822 .change = ets_qdisc_change,
823 .init = ets_qdisc_init,
824 .reset = ets_qdisc_reset,
825 .destroy = ets_qdisc_destroy,
826 .dump = ets_qdisc_dump,
827 .owner = THIS_MODULE,
828};
829
830static int __init ets_init(void)
831{
832 return register_qdisc(&ets_qdisc_ops);
833}
834
835static void __exit ets_exit(void)
836{
837 unregister_qdisc(&ets_qdisc_ops);
838}
839
840module_init(ets_init);
841module_exit(ets_exit);
842MODULE_LICENSE("GPL");