mac80211: fix rate control update on 2040 bss change
[linux-2.6-block.git] / net / sched / sch_cbq.c
CommitLineData
1da177e4
LT
1/*
2 * net/sched/sch_cbq.c Class-Based Queueing discipline.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 *
11 */
12
1da177e4 13#include <linux/module.h>
5a0e3ad6 14#include <linux/slab.h>
1da177e4
LT
15#include <linux/types.h>
16#include <linux/kernel.h>
1da177e4 17#include <linux/string.h>
1da177e4 18#include <linux/errno.h>
1da177e4 19#include <linux/skbuff.h>
0ba48053 20#include <net/netlink.h>
1da177e4
LT
21#include <net/pkt_sched.h>
22
23
24/* Class-Based Queueing (CBQ) algorithm.
25 =======================================
26
27 Sources: [1] Sally Floyd and Van Jacobson, "Link-sharing and Resource
10297b99 28 Management Models for Packet Networks",
1da177e4
LT
29 IEEE/ACM Transactions on Networking, Vol.3, No.4, 1995
30
10297b99 31 [2] Sally Floyd, "Notes on CBQ and Guaranteed Service", 1995
1da177e4 32
10297b99 33 [3] Sally Floyd, "Notes on Class-Based Queueing: Setting
1da177e4
LT
34 Parameters", 1996
35
36 [4] Sally Floyd and Michael Speer, "Experimental Results
37 for Class-Based Queueing", 1998, not published.
38
39 -----------------------------------------------------------------------
40
41 Algorithm skeleton was taken from NS simulator cbq.cc.
42 If someone wants to check this code against the LBL version,
43 he should take into account that ONLY the skeleton was borrowed,
44 the implementation is different. Particularly:
45
46 --- The WRR algorithm is different. Our version looks more
10297b99
YH
47 reasonable (I hope) and works when quanta are allowed to be
48 less than MTU, which is always the case when real time classes
49 have small rates. Note, that the statement of [3] is
50 incomplete, delay may actually be estimated even if class
51 per-round allotment is less than MTU. Namely, if per-round
52 allotment is W*r_i, and r_1+...+r_k = r < 1
1da177e4
LT
53
54 delay_i <= ([MTU/(W*r_i)]*W*r + W*r + k*MTU)/B
55
56 In the worst case we have IntServ estimate with D = W*r+k*MTU
57 and C = MTU*r. The proof (if correct at all) is trivial.
58
59
60 --- It seems that cbq-2.0 is not very accurate. At least, I cannot
61 interpret some places, which look like wrong translations
62 from NS. Anyone is advised to find these differences
63 and explain to me, why I am wrong 8).
64
65 --- Linux has no EOI event, so that we cannot estimate true class
66 idle time. Workaround is to consider the next dequeue event
67 as sign that previous packet is finished. This is wrong because of
68 internal device queueing, but on a permanently loaded link it is true.
69 Moreover, combined with clock integrator, this scheme looks
70 very close to an ideal solution. */
71
72struct cbq_sched_data;
73
74
cc7ec456 75struct cbq_class {
d77fea2e 76 struct Qdisc_class_common common;
1da177e4
LT
77 struct cbq_class *next_alive; /* next class with backlog in this priority band */
78
79/* Parameters */
1da177e4
LT
80 unsigned char priority; /* class priority */
81 unsigned char priority2; /* priority to be used after overlimit */
82 unsigned char ewma_log; /* time constant for idle time calculation */
83 unsigned char ovl_strategy;
c3bc7cff 84#ifdef CONFIG_NET_CLS_ACT
1da177e4
LT
85 unsigned char police;
86#endif
87
88 u32 defmap;
89
90 /* Link-sharing scheduler parameters */
91 long maxidle; /* Class parameters: see below. */
92 long offtime;
93 long minidle;
94 u32 avpkt;
95 struct qdisc_rate_table *R_tab;
96
97 /* Overlimit strategy parameters */
98 void (*overlimit)(struct cbq_class *cl);
1a13cb63 99 psched_tdiff_t penalty;
1da177e4
LT
100
101 /* General scheduler (WRR) parameters */
102 long allot;
103 long quantum; /* Allotment per WRR round */
104 long weight; /* Relative allotment: see below */
105
106 struct Qdisc *qdisc; /* Ptr to CBQ discipline */
107 struct cbq_class *split; /* Ptr to split node */
108 struct cbq_class *share; /* Ptr to LS parent in the class tree */
109 struct cbq_class *tparent; /* Ptr to tree parent in the class tree */
110 struct cbq_class *borrow; /* NULL if class is bandwidth limited;
111 parent otherwise */
112 struct cbq_class *sibling; /* Sibling chain */
113 struct cbq_class *children; /* Pointer to children chain */
114
115 struct Qdisc *q; /* Elementary queueing discipline */
116
117
118/* Variables */
119 unsigned char cpriority; /* Effective priority */
120 unsigned char delayed;
121 unsigned char level; /* level of the class in hierarchy:
122 0 for leaf classes, and maximal
123 level of children + 1 for nodes.
124 */
125
126 psched_time_t last; /* Last end of service */
127 psched_time_t undertime;
128 long avgidle;
129 long deficit; /* Saved deficit for WRR */
1a13cb63 130 psched_time_t penalized;
c1a8f1f1 131 struct gnet_stats_basic_packed bstats;
1da177e4
LT
132 struct gnet_stats_queue qstats;
133 struct gnet_stats_rate_est rate_est;
1da177e4
LT
134 struct tc_cbq_xstats xstats;
135
136 struct tcf_proto *filter_list;
137
138 int refcnt;
139 int filters;
140
cc7ec456 141 struct cbq_class *defaults[TC_PRIO_MAX + 1];
1da177e4
LT
142};
143
cc7ec456 144struct cbq_sched_data {
d77fea2e 145 struct Qdisc_class_hash clhash; /* Hash table of all classes */
cc7ec456
ED
146 int nclasses[TC_CBQ_MAXPRIO + 1];
147 unsigned int quanta[TC_CBQ_MAXPRIO + 1];
1da177e4
LT
148
149 struct cbq_class link;
150
cc7ec456
ED
151 unsigned int activemask;
152 struct cbq_class *active[TC_CBQ_MAXPRIO + 1]; /* List of all classes
1da177e4
LT
153 with backlog */
154
c3bc7cff 155#ifdef CONFIG_NET_CLS_ACT
1da177e4
LT
156 struct cbq_class *rx_class;
157#endif
158 struct cbq_class *tx_class;
159 struct cbq_class *tx_borrowed;
160 int tx_len;
161 psched_time_t now; /* Cached timestamp */
162 psched_time_t now_rt; /* Cached real time */
cc7ec456 163 unsigned int pmask;
1da177e4 164
2fbd3da3 165 struct hrtimer delay_timer;
88a99354 166 struct qdisc_watchdog watchdog; /* Watchdog timer,
1da177e4
LT
167 started when CBQ has
168 backlog, but cannot
169 transmit just now */
88a99354 170 psched_tdiff_t wd_expires;
1da177e4
LT
171 int toplevel;
172 u32 hgenerator;
173};
174
175
cc7ec456 176#define L2T(cl, len) qdisc_l2t((cl)->R_tab, len)
1da177e4 177
cc7ec456 178static inline struct cbq_class *
1da177e4
LT
179cbq_class_lookup(struct cbq_sched_data *q, u32 classid)
180{
d77fea2e 181 struct Qdisc_class_common *clc;
1da177e4 182
d77fea2e
PM
183 clc = qdisc_class_find(&q->clhash, classid);
184 if (clc == NULL)
185 return NULL;
186 return container_of(clc, struct cbq_class, common);
1da177e4
LT
187}
188
c3bc7cff 189#ifdef CONFIG_NET_CLS_ACT
1da177e4
LT
190
191static struct cbq_class *
192cbq_reclassify(struct sk_buff *skb, struct cbq_class *this)
193{
cc7ec456 194 struct cbq_class *cl;
1da177e4 195
cc7ec456
ED
196 for (cl = this->tparent; cl; cl = cl->tparent) {
197 struct cbq_class *new = cl->defaults[TC_PRIO_BESTEFFORT];
1da177e4 198
cc7ec456
ED
199 if (new != NULL && new != this)
200 return new;
201 }
1da177e4
LT
202 return NULL;
203}
204
205#endif
206
207/* Classify packet. The procedure is pretty complicated, but
cc7ec456
ED
208 * it allows us to combine link sharing and priority scheduling
209 * transparently.
210 *
211 * Namely, you can put link sharing rules (f.e. route based) at root of CBQ,
212 * so that it resolves to split nodes. Then packets are classified
213 * by logical priority, or a more specific classifier may be attached
214 * to the split node.
1da177e4
LT
215 */
216
217static struct cbq_class *
218cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
219{
220 struct cbq_sched_data *q = qdisc_priv(sch);
221 struct cbq_class *head = &q->link;
222 struct cbq_class **defmap;
223 struct cbq_class *cl = NULL;
224 u32 prio = skb->priority;
225 struct tcf_result res;
226
227 /*
228 * Step 1. If skb->priority points to one of our classes, use it.
229 */
cc7ec456 230 if (TC_H_MAJ(prio ^ sch->handle) == 0 &&
1da177e4
LT
231 (cl = cbq_class_lookup(q, prio)) != NULL)
232 return cl;
233
c27f339a 234 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
1da177e4
LT
235 for (;;) {
236 int result = 0;
237 defmap = head->defaults;
238
239 /*
240 * Step 2+n. Apply classifier.
241 */
73ca4918
PM
242 if (!head->filter_list ||
243 (result = tc_classify_compat(skb, head->filter_list, &res)) < 0)
1da177e4
LT
244 goto fallback;
245
cc7ec456
ED
246 cl = (void *)res.class;
247 if (!cl) {
1da177e4
LT
248 if (TC_H_MAJ(res.classid))
249 cl = cbq_class_lookup(q, res.classid);
cc7ec456 250 else if ((cl = defmap[res.classid & TC_PRIO_MAX]) == NULL)
1da177e4
LT
251 cl = defmap[TC_PRIO_BESTEFFORT];
252
253 if (cl == NULL || cl->level >= head->level)
254 goto fallback;
255 }
256
257#ifdef CONFIG_NET_CLS_ACT
258 switch (result) {
259 case TC_ACT_QUEUED:
10297b99 260 case TC_ACT_STOLEN:
378a2f09 261 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
1da177e4
LT
262 case TC_ACT_SHOT:
263 return NULL;
73ca4918
PM
264 case TC_ACT_RECLASSIFY:
265 return cbq_reclassify(skb, cl);
1da177e4 266 }
1da177e4
LT
267#endif
268 if (cl->level == 0)
269 return cl;
270
271 /*
272 * Step 3+n. If classifier selected a link sharing class,
273 * apply agency specific classifier.
274 * Repeat this procdure until we hit a leaf node.
275 */
276 head = cl;
277 }
278
279fallback:
280 cl = head;
281
282 /*
283 * Step 4. No success...
284 */
285 if (TC_H_MAJ(prio) == 0 &&
cc7ec456 286 !(cl = head->defaults[prio & TC_PRIO_MAX]) &&
1da177e4
LT
287 !(cl = head->defaults[TC_PRIO_BESTEFFORT]))
288 return head;
289
290 return cl;
291}
292
293/*
cc7ec456
ED
294 * A packet has just been enqueued on the empty class.
295 * cbq_activate_class adds it to the tail of active class list
296 * of its priority band.
1da177e4
LT
297 */
298
cc7ec456 299static inline void cbq_activate_class(struct cbq_class *cl)
1da177e4
LT
300{
301 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
302 int prio = cl->cpriority;
303 struct cbq_class *cl_tail;
304
305 cl_tail = q->active[prio];
306 q->active[prio] = cl;
307
308 if (cl_tail != NULL) {
309 cl->next_alive = cl_tail->next_alive;
310 cl_tail->next_alive = cl;
311 } else {
312 cl->next_alive = cl;
313 q->activemask |= (1<<prio);
314 }
315}
316
317/*
cc7ec456
ED
318 * Unlink class from active chain.
319 * Note that this same procedure is done directly in cbq_dequeue*
320 * during round-robin procedure.
1da177e4
LT
321 */
322
323static void cbq_deactivate_class(struct cbq_class *this)
324{
325 struct cbq_sched_data *q = qdisc_priv(this->qdisc);
326 int prio = this->cpriority;
327 struct cbq_class *cl;
328 struct cbq_class *cl_prev = q->active[prio];
329
330 do {
331 cl = cl_prev->next_alive;
332 if (cl == this) {
333 cl_prev->next_alive = cl->next_alive;
334 cl->next_alive = NULL;
335
336 if (cl == q->active[prio]) {
337 q->active[prio] = cl_prev;
338 if (cl == q->active[prio]) {
339 q->active[prio] = NULL;
340 q->activemask &= ~(1<<prio);
341 return;
342 }
343 }
1da177e4
LT
344 return;
345 }
346 } while ((cl_prev = cl) != q->active[prio]);
347}
348
349static void
350cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl)
351{
352 int toplevel = q->toplevel;
353
fd245a4a 354 if (toplevel > cl->level && !(qdisc_is_throttled(cl->q))) {
1da177e4
LT
355 psched_time_t now;
356 psched_tdiff_t incr;
357
3bebcda2 358 now = psched_get_time();
8edc0c31 359 incr = now - q->now_rt;
7c59e25f 360 now = q->now + incr;
1da177e4
LT
361
362 do {
104e0878 363 if (cl->undertime < now) {
1da177e4
LT
364 q->toplevel = cl->level;
365 return;
366 }
cc7ec456 367 } while ((cl = cl->borrow) != NULL && toplevel > cl->level);
1da177e4
LT
368 }
369}
370
371static int
372cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
373{
374 struct cbq_sched_data *q = qdisc_priv(sch);
ddeee3ce 375 int uninitialized_var(ret);
1da177e4
LT
376 struct cbq_class *cl = cbq_classify(skb, sch, &ret);
377
c3bc7cff 378#ifdef CONFIG_NET_CLS_ACT
1da177e4
LT
379 q->rx_class = cl;
380#endif
381 if (cl == NULL) {
c27f339a 382 if (ret & __NET_XMIT_BYPASS)
1da177e4
LT
383 sch->qstats.drops++;
384 kfree_skb(skb);
385 return ret;
386 }
387
c3bc7cff 388#ifdef CONFIG_NET_CLS_ACT
1da177e4
LT
389 cl->q->__parent = sch;
390#endif
5f86173b
JK
391 ret = qdisc_enqueue(skb, cl->q);
392 if (ret == NET_XMIT_SUCCESS) {
1da177e4 393 sch->q.qlen++;
1da177e4
LT
394 cbq_mark_toplevel(q, cl);
395 if (!cl->next_alive)
396 cbq_activate_class(cl);
397 return ret;
398 }
399
378a2f09
JP
400 if (net_xmit_drop_count(ret)) {
401 sch->qstats.drops++;
402 cbq_mark_toplevel(q, cl);
403 cl->qstats.drops++;
404 }
1da177e4
LT
405 return ret;
406}
407
1da177e4
LT
408/* Overlimit actions */
409
410/* TC_CBQ_OVL_CLASSIC: (default) penalize leaf class by adding offtime */
411
412static void cbq_ovl_classic(struct cbq_class *cl)
413{
414 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
8edc0c31 415 psched_tdiff_t delay = cl->undertime - q->now;
1da177e4
LT
416
417 if (!cl->delayed) {
418 delay += cl->offtime;
419
10297b99 420 /*
cc7ec456
ED
421 * Class goes to sleep, so that it will have no
422 * chance to work avgidle. Let's forgive it 8)
423 *
424 * BTW cbq-2.0 has a crap in this
425 * place, apparently they forgot to shift it by cl->ewma_log.
1da177e4
LT
426 */
427 if (cl->avgidle < 0)
428 delay -= (-cl->avgidle) - ((-cl->avgidle) >> cl->ewma_log);
429 if (cl->avgidle < cl->minidle)
430 cl->avgidle = cl->minidle;
431 if (delay <= 0)
432 delay = 1;
7c59e25f 433 cl->undertime = q->now + delay;
1da177e4
LT
434
435 cl->xstats.overactions++;
436 cl->delayed = 1;
437 }
438 if (q->wd_expires == 0 || q->wd_expires > delay)
439 q->wd_expires = delay;
440
441 /* Dirty work! We must schedule wakeups based on
cc7ec456
ED
442 * real available rate, rather than leaf rate,
443 * which may be tiny (even zero).
1da177e4
LT
444 */
445 if (q->toplevel == TC_CBQ_MAXLEVEL) {
446 struct cbq_class *b;
447 psched_tdiff_t base_delay = q->wd_expires;
448
449 for (b = cl->borrow; b; b = b->borrow) {
8edc0c31 450 delay = b->undertime - q->now;
1da177e4
LT
451 if (delay < base_delay) {
452 if (delay <= 0)
453 delay = 1;
454 base_delay = delay;
455 }
456 }
457
458 q->wd_expires = base_delay;
459 }
460}
461
462/* TC_CBQ_OVL_RCLASSIC: penalize by offtime classes in hierarchy, when
cc7ec456 463 * they go overlimit
1da177e4
LT
464 */
465
466static void cbq_ovl_rclassic(struct cbq_class *cl)
467{
468 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
469 struct cbq_class *this = cl;
470
471 do {
472 if (cl->level > q->toplevel) {
473 cl = NULL;
474 break;
475 }
476 } while ((cl = cl->borrow) != NULL);
477
478 if (cl == NULL)
479 cl = this;
480 cbq_ovl_classic(cl);
481}
482
483/* TC_CBQ_OVL_DELAY: delay until it will go to underlimit */
484
485static void cbq_ovl_delay(struct cbq_class *cl)
486{
487 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
8edc0c31 488 psched_tdiff_t delay = cl->undertime - q->now;
1da177e4 489
2540e051
JP
490 if (test_bit(__QDISC_STATE_DEACTIVATED,
491 &qdisc_root_sleeping(cl->qdisc)->state))
492 return;
493
1da177e4 494 if (!cl->delayed) {
1a13cb63
PM
495 psched_time_t sched = q->now;
496 ktime_t expires;
1da177e4
LT
497
498 delay += cl->offtime;
499 if (cl->avgidle < 0)
500 delay -= (-cl->avgidle) - ((-cl->avgidle) >> cl->ewma_log);
501 if (cl->avgidle < cl->minidle)
502 cl->avgidle = cl->minidle;
7c59e25f 503 cl->undertime = q->now + delay;
1da177e4
LT
504
505 if (delay > 0) {
1a13cb63 506 sched += delay + cl->penalty;
1da177e4
LT
507 cl->penalized = sched;
508 cl->cpriority = TC_CBQ_MAXPRIO;
509 q->pmask |= (1<<TC_CBQ_MAXPRIO);
1a13cb63
PM
510
511 expires = ktime_set(0, 0);
ca44d6e6 512 expires = ktime_add_ns(expires, PSCHED_TICKS2NS(sched));
2fbd3da3
DM
513 if (hrtimer_try_to_cancel(&q->delay_timer) &&
514 ktime_to_ns(ktime_sub(
515 hrtimer_get_expires(&q->delay_timer),
516 expires)) > 0)
517 hrtimer_set_expires(&q->delay_timer, expires);
518 hrtimer_restart(&q->delay_timer);
1da177e4
LT
519 cl->delayed = 1;
520 cl->xstats.overactions++;
521 return;
522 }
523 delay = 1;
524 }
525 if (q->wd_expires == 0 || q->wd_expires > delay)
526 q->wd_expires = delay;
527}
528
529/* TC_CBQ_OVL_LOWPRIO: penalize class by lowering its priority band */
530
531static void cbq_ovl_lowprio(struct cbq_class *cl)
532{
533 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
534
1a13cb63 535 cl->penalized = q->now + cl->penalty;
1da177e4
LT
536
537 if (cl->cpriority != cl->priority2) {
538 cl->cpriority = cl->priority2;
539 q->pmask |= (1<<cl->cpriority);
540 cl->xstats.overactions++;
541 }
542 cbq_ovl_classic(cl);
543}
544
545/* TC_CBQ_OVL_DROP: penalize class by dropping */
546
547static void cbq_ovl_drop(struct cbq_class *cl)
548{
549 if (cl->q->ops->drop)
550 if (cl->q->ops->drop(cl->q))
551 cl->qdisc->q.qlen--;
552 cl->xstats.overactions++;
553 cbq_ovl_classic(cl);
554}
555
1a13cb63
PM
556static psched_tdiff_t cbq_undelay_prio(struct cbq_sched_data *q, int prio,
557 psched_time_t now)
1da177e4
LT
558{
559 struct cbq_class *cl;
560 struct cbq_class *cl_prev = q->active[prio];
1a13cb63 561 psched_time_t sched = now;
1da177e4
LT
562
563 if (cl_prev == NULL)
e9054a33 564 return 0;
1da177e4
LT
565
566 do {
567 cl = cl_prev->next_alive;
1a13cb63 568 if (now - cl->penalized > 0) {
1da177e4
LT
569 cl_prev->next_alive = cl->next_alive;
570 cl->next_alive = NULL;
571 cl->cpriority = cl->priority;
572 cl->delayed = 0;
573 cbq_activate_class(cl);
574
575 if (cl == q->active[prio]) {
576 q->active[prio] = cl_prev;
577 if (cl == q->active[prio]) {
578 q->active[prio] = NULL;
579 return 0;
580 }
581 }
582
583 cl = cl_prev->next_alive;
1a13cb63 584 } else if (sched - cl->penalized > 0)
1da177e4
LT
585 sched = cl->penalized;
586 } while ((cl_prev = cl) != q->active[prio]);
587
1a13cb63 588 return sched - now;
1da177e4
LT
589}
590
1a13cb63 591static enum hrtimer_restart cbq_undelay(struct hrtimer *timer)
1da177e4 592{
1a13cb63 593 struct cbq_sched_data *q = container_of(timer, struct cbq_sched_data,
2fbd3da3 594 delay_timer);
1a13cb63
PM
595 struct Qdisc *sch = q->watchdog.qdisc;
596 psched_time_t now;
597 psched_tdiff_t delay = 0;
cc7ec456 598 unsigned int pmask;
1da177e4 599
3bebcda2 600 now = psched_get_time();
1a13cb63 601
1da177e4
LT
602 pmask = q->pmask;
603 q->pmask = 0;
604
605 while (pmask) {
606 int prio = ffz(~pmask);
1a13cb63 607 psched_tdiff_t tmp;
1da177e4
LT
608
609 pmask &= ~(1<<prio);
610
1a13cb63 611 tmp = cbq_undelay_prio(q, prio, now);
1da177e4
LT
612 if (tmp > 0) {
613 q->pmask |= 1<<prio;
614 if (tmp < delay || delay == 0)
615 delay = tmp;
616 }
617 }
618
619 if (delay) {
1a13cb63
PM
620 ktime_t time;
621
622 time = ktime_set(0, 0);
ca44d6e6 623 time = ktime_add_ns(time, PSCHED_TICKS2NS(now + delay));
2fbd3da3 624 hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS);
1da177e4
LT
625 }
626
fd245a4a 627 qdisc_unthrottled(sch);
8608db03 628 __netif_schedule(qdisc_root(sch));
1a13cb63 629 return HRTIMER_NORESTART;
1da177e4
LT
630}
631
c3bc7cff 632#ifdef CONFIG_NET_CLS_ACT
1da177e4
LT
633static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
634{
1da177e4
LT
635 struct Qdisc *sch = child->__parent;
636 struct cbq_sched_data *q = qdisc_priv(sch);
637 struct cbq_class *cl = q->rx_class;
638
639 q->rx_class = NULL;
640
641 if (cl && (cl = cbq_reclassify(skb, cl)) != NULL) {
378a2f09 642 int ret;
1da177e4
LT
643
644 cbq_mark_toplevel(q, cl);
645
646 q->rx_class = cl;
647 cl->q->__parent = sch;
648
378a2f09
JP
649 ret = qdisc_enqueue(skb, cl->q);
650 if (ret == NET_XMIT_SUCCESS) {
1da177e4 651 sch->q.qlen++;
1da177e4
LT
652 if (!cl->next_alive)
653 cbq_activate_class(cl);
654 return 0;
655 }
378a2f09
JP
656 if (net_xmit_drop_count(ret))
657 sch->qstats.drops++;
1da177e4
LT
658 return 0;
659 }
660
661 sch->qstats.drops++;
662 return -1;
663}
664#endif
665
10297b99 666/*
cc7ec456
ED
667 * It is mission critical procedure.
668 *
669 * We "regenerate" toplevel cutoff, if transmitting class
670 * has backlog and it is not regulated. It is not part of
671 * original CBQ description, but looks more reasonable.
672 * Probably, it is wrong. This question needs further investigation.
673 */
1da177e4 674
cc7ec456 675static inline void
1da177e4
LT
676cbq_update_toplevel(struct cbq_sched_data *q, struct cbq_class *cl,
677 struct cbq_class *borrowed)
678{
679 if (cl && q->toplevel >= borrowed->level) {
680 if (cl->q->q.qlen > 1) {
681 do {
a084980d 682 if (borrowed->undertime == PSCHED_PASTPERFECT) {
1da177e4
LT
683 q->toplevel = borrowed->level;
684 return;
685 }
cc7ec456 686 } while ((borrowed = borrowed->borrow) != NULL);
1da177e4 687 }
10297b99 688#if 0
1da177e4
LT
689 /* It is not necessary now. Uncommenting it
690 will save CPU cycles, but decrease fairness.
691 */
692 q->toplevel = TC_CBQ_MAXLEVEL;
693#endif
694 }
695}
696
697static void
698cbq_update(struct cbq_sched_data *q)
699{
700 struct cbq_class *this = q->tx_class;
701 struct cbq_class *cl = this;
702 int len = q->tx_len;
703
704 q->tx_class = NULL;
705
706 for ( ; cl; cl = cl->share) {
707 long avgidle = cl->avgidle;
708 long idle;
709
710 cl->bstats.packets++;
711 cl->bstats.bytes += len;
712
713 /*
cc7ec456
ED
714 * (now - last) is total time between packet right edges.
715 * (last_pktlen/rate) is "virtual" busy time, so that
716 *
717 * idle = (now - last) - last_pktlen/rate
1da177e4
LT
718 */
719
8edc0c31 720 idle = q->now - cl->last;
1da177e4
LT
721 if ((unsigned long)idle > 128*1024*1024) {
722 avgidle = cl->maxidle;
723 } else {
724 idle -= L2T(cl, len);
725
726 /* true_avgidle := (1-W)*true_avgidle + W*idle,
cc7ec456
ED
727 * where W=2^{-ewma_log}. But cl->avgidle is scaled:
728 * cl->avgidle == true_avgidle/W,
729 * hence:
1da177e4
LT
730 */
731 avgidle += idle - (avgidle>>cl->ewma_log);
732 }
733
734 if (avgidle <= 0) {
735 /* Overlimit or at-limit */
736
737 if (avgidle < cl->minidle)
738 avgidle = cl->minidle;
739
740 cl->avgidle = avgidle;
741
742 /* Calculate expected time, when this class
cc7ec456
ED
743 * will be allowed to send.
744 * It will occur, when:
745 * (1-W)*true_avgidle + W*delay = 0, i.e.
746 * idle = (1/W - 1)*(-true_avgidle)
747 * or
748 * idle = (1 - W)*(-cl->avgidle);
1da177e4
LT
749 */
750 idle = (-avgidle) - ((-avgidle) >> cl->ewma_log);
751
752 /*
cc7ec456
ED
753 * That is not all.
754 * To maintain the rate allocated to the class,
755 * we add to undertime virtual clock,
756 * necessary to complete transmitted packet.
757 * (len/phys_bandwidth has been already passed
758 * to the moment of cbq_update)
1da177e4
LT
759 */
760
761 idle -= L2T(&q->link, len);
762 idle += L2T(cl, len);
763
7c59e25f 764 cl->undertime = q->now + idle;
1da177e4
LT
765 } else {
766 /* Underlimit */
767
a084980d 768 cl->undertime = PSCHED_PASTPERFECT;
1da177e4
LT
769 if (avgidle > cl->maxidle)
770 cl->avgidle = cl->maxidle;
771 else
772 cl->avgidle = avgidle;
773 }
774 cl->last = q->now;
775 }
776
777 cbq_update_toplevel(q, this, q->tx_borrowed);
778}
779
cc7ec456 780static inline struct cbq_class *
1da177e4
LT
781cbq_under_limit(struct cbq_class *cl)
782{
783 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
784 struct cbq_class *this_cl = cl;
785
786 if (cl->tparent == NULL)
787 return cl;
788
a084980d 789 if (cl->undertime == PSCHED_PASTPERFECT || q->now >= cl->undertime) {
1da177e4
LT
790 cl->delayed = 0;
791 return cl;
792 }
793
794 do {
795 /* It is very suspicious place. Now overlimit
cc7ec456
ED
796 * action is generated for not bounded classes
797 * only if link is completely congested.
798 * Though it is in agree with ancestor-only paradigm,
799 * it looks very stupid. Particularly,
800 * it means that this chunk of code will either
801 * never be called or result in strong amplification
802 * of burstiness. Dangerous, silly, and, however,
803 * no another solution exists.
1da177e4 804 */
cc7ec456
ED
805 cl = cl->borrow;
806 if (!cl) {
1da177e4
LT
807 this_cl->qstats.overlimits++;
808 this_cl->overlimit(this_cl);
809 return NULL;
810 }
811 if (cl->level > q->toplevel)
812 return NULL;
a084980d 813 } while (cl->undertime != PSCHED_PASTPERFECT && q->now < cl->undertime);
1da177e4
LT
814
815 cl->delayed = 0;
816 return cl;
817}
818
cc7ec456 819static inline struct sk_buff *
1da177e4
LT
820cbq_dequeue_prio(struct Qdisc *sch, int prio)
821{
822 struct cbq_sched_data *q = qdisc_priv(sch);
823 struct cbq_class *cl_tail, *cl_prev, *cl;
824 struct sk_buff *skb;
825 int deficit;
826
827 cl_tail = cl_prev = q->active[prio];
828 cl = cl_prev->next_alive;
829
830 do {
831 deficit = 0;
832
833 /* Start round */
834 do {
835 struct cbq_class *borrow = cl;
836
837 if (cl->q->q.qlen &&
838 (borrow = cbq_under_limit(cl)) == NULL)
839 goto skip_class;
840
841 if (cl->deficit <= 0) {
842 /* Class exhausted its allotment per
cc7ec456 843 * this round. Switch to the next one.
1da177e4
LT
844 */
845 deficit = 1;
846 cl->deficit += cl->quantum;
847 goto next_class;
848 }
849
850 skb = cl->q->dequeue(cl->q);
851
852 /* Class did not give us any skb :-(
cc7ec456
ED
853 * It could occur even if cl->q->q.qlen != 0
854 * f.e. if cl->q == "tbf"
1da177e4
LT
855 */
856 if (skb == NULL)
857 goto skip_class;
858
0abf77e5 859 cl->deficit -= qdisc_pkt_len(skb);
1da177e4
LT
860 q->tx_class = cl;
861 q->tx_borrowed = borrow;
862 if (borrow != cl) {
863#ifndef CBQ_XSTATS_BORROWS_BYTES
864 borrow->xstats.borrows++;
865 cl->xstats.borrows++;
866#else
0abf77e5
JK
867 borrow->xstats.borrows += qdisc_pkt_len(skb);
868 cl->xstats.borrows += qdisc_pkt_len(skb);
1da177e4
LT
869#endif
870 }
0abf77e5 871 q->tx_len = qdisc_pkt_len(skb);
1da177e4
LT
872
873 if (cl->deficit <= 0) {
874 q->active[prio] = cl;
875 cl = cl->next_alive;
876 cl->deficit += cl->quantum;
877 }
878 return skb;
879
880skip_class:
881 if (cl->q->q.qlen == 0 || prio != cl->cpriority) {
882 /* Class is empty or penalized.
cc7ec456 883 * Unlink it from active chain.
1da177e4
LT
884 */
885 cl_prev->next_alive = cl->next_alive;
886 cl->next_alive = NULL;
887
888 /* Did cl_tail point to it? */
889 if (cl == cl_tail) {
890 /* Repair it! */
891 cl_tail = cl_prev;
892
893 /* Was it the last class in this band? */
894 if (cl == cl_tail) {
895 /* Kill the band! */
896 q->active[prio] = NULL;
897 q->activemask &= ~(1<<prio);
898 if (cl->q->q.qlen)
899 cbq_activate_class(cl);
900 return NULL;
901 }
902
903 q->active[prio] = cl_tail;
904 }
905 if (cl->q->q.qlen)
906 cbq_activate_class(cl);
907
908 cl = cl_prev;
909 }
910
911next_class:
912 cl_prev = cl;
913 cl = cl->next_alive;
914 } while (cl_prev != cl_tail);
915 } while (deficit);
916
917 q->active[prio] = cl_prev;
918
919 return NULL;
920}
921
cc7ec456 922static inline struct sk_buff *
1da177e4
LT
923cbq_dequeue_1(struct Qdisc *sch)
924{
925 struct cbq_sched_data *q = qdisc_priv(sch);
926 struct sk_buff *skb;
cc7ec456 927 unsigned int activemask;
1da177e4 928
cc7ec456 929 activemask = q->activemask & 0xFF;
1da177e4
LT
930 while (activemask) {
931 int prio = ffz(~activemask);
932 activemask &= ~(1<<prio);
933 skb = cbq_dequeue_prio(sch, prio);
934 if (skb)
935 return skb;
936 }
937 return NULL;
938}
939
940static struct sk_buff *
941cbq_dequeue(struct Qdisc *sch)
942{
943 struct sk_buff *skb;
944 struct cbq_sched_data *q = qdisc_priv(sch);
945 psched_time_t now;
946 psched_tdiff_t incr;
947
3bebcda2 948 now = psched_get_time();
8edc0c31 949 incr = now - q->now_rt;
1da177e4
LT
950
951 if (q->tx_class) {
952 psched_tdiff_t incr2;
953 /* Time integrator. We calculate EOS time
cc7ec456
ED
954 * by adding expected packet transmission time.
955 * If real time is greater, we warp artificial clock,
956 * so that:
957 *
958 * cbq_time = max(real_time, work);
1da177e4
LT
959 */
960 incr2 = L2T(&q->link, q->tx_len);
7c59e25f 961 q->now += incr2;
1da177e4
LT
962 cbq_update(q);
963 if ((incr -= incr2) < 0)
964 incr = 0;
965 }
7c59e25f 966 q->now += incr;
1da177e4
LT
967 q->now_rt = now;
968
969 for (;;) {
970 q->wd_expires = 0;
971
972 skb = cbq_dequeue_1(sch);
973 if (skb) {
9190b3b3 974 qdisc_bstats_update(sch, skb);
1da177e4 975 sch->q.qlen--;
fd245a4a 976 qdisc_unthrottled(sch);
1da177e4
LT
977 return skb;
978 }
979
980 /* All the classes are overlimit.
cc7ec456
ED
981 *
982 * It is possible, if:
983 *
984 * 1. Scheduler is empty.
985 * 2. Toplevel cutoff inhibited borrowing.
986 * 3. Root class is overlimit.
987 *
988 * Reset 2d and 3d conditions and retry.
989 *
990 * Note, that NS and cbq-2.0 are buggy, peeking
991 * an arbitrary class is appropriate for ancestor-only
992 * sharing, but not for toplevel algorithm.
993 *
994 * Our version is better, but slower, because it requires
995 * two passes, but it is unavoidable with top-level sharing.
996 */
1da177e4
LT
997
998 if (q->toplevel == TC_CBQ_MAXLEVEL &&
a084980d 999 q->link.undertime == PSCHED_PASTPERFECT)
1da177e4
LT
1000 break;
1001
1002 q->toplevel = TC_CBQ_MAXLEVEL;
a084980d 1003 q->link.undertime = PSCHED_PASTPERFECT;
1da177e4
LT
1004 }
1005
1006 /* No packets in scheduler or nobody wants to give them to us :-(
cc7ec456
ED
1007 * Sigh... start watchdog timer in the last case.
1008 */
1da177e4
LT
1009
1010 if (sch->q.qlen) {
1011 sch->qstats.overlimits++;
88a99354
PM
1012 if (q->wd_expires)
1013 qdisc_watchdog_schedule(&q->watchdog,
bb239acf 1014 now + q->wd_expires);
1da177e4
LT
1015 }
1016 return NULL;
1017}
1018
1019/* CBQ class maintanance routines */
1020
1021static void cbq_adjust_levels(struct cbq_class *this)
1022{
1023 if (this == NULL)
1024 return;
1025
1026 do {
1027 int level = 0;
1028 struct cbq_class *cl;
1029
cc7ec456
ED
1030 cl = this->children;
1031 if (cl) {
1da177e4
LT
1032 do {
1033 if (cl->level > level)
1034 level = cl->level;
1035 } while ((cl = cl->sibling) != this->children);
1036 }
cc7ec456 1037 this->level = level + 1;
1da177e4
LT
1038 } while ((this = this->tparent) != NULL);
1039}
1040
1041static void cbq_normalize_quanta(struct cbq_sched_data *q, int prio)
1042{
1043 struct cbq_class *cl;
d77fea2e
PM
1044 struct hlist_node *n;
1045 unsigned int h;
1da177e4
LT
1046
1047 if (q->quanta[prio] == 0)
1048 return;
1049
d77fea2e
PM
1050 for (h = 0; h < q->clhash.hashsize; h++) {
1051 hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode) {
1da177e4 1052 /* BUGGGG... Beware! This expression suffer of
cc7ec456 1053 * arithmetic overflows!
1da177e4
LT
1054 */
1055 if (cl->priority == prio) {
1056 cl->quantum = (cl->weight*cl->allot*q->nclasses[prio])/
1057 q->quanta[prio];
1058 }
5ce2d488 1059 if (cl->quantum <= 0 || cl->quantum>32*qdisc_dev(cl->qdisc)->mtu) {
cc7ec456
ED
1060 pr_warning("CBQ: class %08x has bad quantum==%ld, repaired.\n",
1061 cl->common.classid, cl->quantum);
5ce2d488 1062 cl->quantum = qdisc_dev(cl->qdisc)->mtu/2 + 1;
1da177e4
LT
1063 }
1064 }
1065 }
1066}
1067
1068static void cbq_sync_defmap(struct cbq_class *cl)
1069{
1070 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
1071 struct cbq_class *split = cl->split;
cc7ec456 1072 unsigned int h;
1da177e4
LT
1073 int i;
1074
1075 if (split == NULL)
1076 return;
1077
cc7ec456
ED
1078 for (i = 0; i <= TC_PRIO_MAX; i++) {
1079 if (split->defaults[i] == cl && !(cl->defmap & (1<<i)))
1da177e4
LT
1080 split->defaults[i] = NULL;
1081 }
1082
cc7ec456 1083 for (i = 0; i <= TC_PRIO_MAX; i++) {
1da177e4
LT
1084 int level = split->level;
1085
1086 if (split->defaults[i])
1087 continue;
1088
d77fea2e
PM
1089 for (h = 0; h < q->clhash.hashsize; h++) {
1090 struct hlist_node *n;
1da177e4
LT
1091 struct cbq_class *c;
1092
d77fea2e
PM
1093 hlist_for_each_entry(c, n, &q->clhash.hash[h],
1094 common.hnode) {
1da177e4 1095 if (c->split == split && c->level < level &&
cc7ec456 1096 c->defmap & (1<<i)) {
1da177e4
LT
1097 split->defaults[i] = c;
1098 level = c->level;
1099 }
1100 }
1101 }
1102 }
1103}
1104
1105static void cbq_change_defmap(struct cbq_class *cl, u32 splitid, u32 def, u32 mask)
1106{
1107 struct cbq_class *split = NULL;
1108
1109 if (splitid == 0) {
cc7ec456
ED
1110 split = cl->split;
1111 if (!split)
1da177e4 1112 return;
d77fea2e 1113 splitid = split->common.classid;
1da177e4
LT
1114 }
1115
d77fea2e 1116 if (split == NULL || split->common.classid != splitid) {
1da177e4 1117 for (split = cl->tparent; split; split = split->tparent)
d77fea2e 1118 if (split->common.classid == splitid)
1da177e4
LT
1119 break;
1120 }
1121
1122 if (split == NULL)
1123 return;
1124
1125 if (cl->split != split) {
1126 cl->defmap = 0;
1127 cbq_sync_defmap(cl);
1128 cl->split = split;
cc7ec456 1129 cl->defmap = def & mask;
1da177e4 1130 } else
cc7ec456 1131 cl->defmap = (cl->defmap & ~mask) | (def & mask);
1da177e4
LT
1132
1133 cbq_sync_defmap(cl);
1134}
1135
1136static void cbq_unlink_class(struct cbq_class *this)
1137{
1138 struct cbq_class *cl, **clp;
1139 struct cbq_sched_data *q = qdisc_priv(this->qdisc);
1140
d77fea2e 1141 qdisc_class_hash_remove(&q->clhash, &this->common);
1da177e4
LT
1142
1143 if (this->tparent) {
cc7ec456 1144 clp = &this->sibling;
1da177e4
LT
1145 cl = *clp;
1146 do {
1147 if (cl == this) {
1148 *clp = cl->sibling;
1149 break;
1150 }
1151 clp = &cl->sibling;
1152 } while ((cl = *clp) != this->sibling);
1153
1154 if (this->tparent->children == this) {
1155 this->tparent->children = this->sibling;
1156 if (this->sibling == this)
1157 this->tparent->children = NULL;
1158 }
1159 } else {
547b792c 1160 WARN_ON(this->sibling != this);
1da177e4
LT
1161 }
1162}
1163
1164static void cbq_link_class(struct cbq_class *this)
1165{
1166 struct cbq_sched_data *q = qdisc_priv(this->qdisc);
1da177e4
LT
1167 struct cbq_class *parent = this->tparent;
1168
1169 this->sibling = this;
d77fea2e 1170 qdisc_class_hash_insert(&q->clhash, &this->common);
1da177e4
LT
1171
1172 if (parent == NULL)
1173 return;
1174
1175 if (parent->children == NULL) {
1176 parent->children = this;
1177 } else {
1178 this->sibling = parent->children->sibling;
1179 parent->children->sibling = this;
1180 }
1181}
1182
cc7ec456 1183static unsigned int cbq_drop(struct Qdisc *sch)
1da177e4
LT
1184{
1185 struct cbq_sched_data *q = qdisc_priv(sch);
1186 struct cbq_class *cl, *cl_head;
1187 int prio;
1188 unsigned int len;
1189
1190 for (prio = TC_CBQ_MAXPRIO; prio >= 0; prio--) {
cc7ec456
ED
1191 cl_head = q->active[prio];
1192 if (!cl_head)
1da177e4
LT
1193 continue;
1194
1195 cl = cl_head;
1196 do {
1197 if (cl->q->ops->drop && (len = cl->q->ops->drop(cl->q))) {
1198 sch->q.qlen--;
a37ef2e3
JP
1199 if (!cl->q->q.qlen)
1200 cbq_deactivate_class(cl);
1da177e4
LT
1201 return len;
1202 }
1203 } while ((cl = cl->next_alive) != cl_head);
1204 }
1205 return 0;
1206}
1207
1208static void
cc7ec456 1209cbq_reset(struct Qdisc *sch)
1da177e4
LT
1210{
1211 struct cbq_sched_data *q = qdisc_priv(sch);
1212 struct cbq_class *cl;
d77fea2e 1213 struct hlist_node *n;
1da177e4 1214 int prio;
cc7ec456 1215 unsigned int h;
1da177e4
LT
1216
1217 q->activemask = 0;
1218 q->pmask = 0;
1219 q->tx_class = NULL;
1220 q->tx_borrowed = NULL;
88a99354 1221 qdisc_watchdog_cancel(&q->watchdog);
2fbd3da3 1222 hrtimer_cancel(&q->delay_timer);
1da177e4 1223 q->toplevel = TC_CBQ_MAXLEVEL;
3bebcda2 1224 q->now = psched_get_time();
1da177e4
LT
1225 q->now_rt = q->now;
1226
1227 for (prio = 0; prio <= TC_CBQ_MAXPRIO; prio++)
1228 q->active[prio] = NULL;
1229
d77fea2e
PM
1230 for (h = 0; h < q->clhash.hashsize; h++) {
1231 hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode) {
1da177e4
LT
1232 qdisc_reset(cl->q);
1233
1234 cl->next_alive = NULL;
a084980d 1235 cl->undertime = PSCHED_PASTPERFECT;
1da177e4
LT
1236 cl->avgidle = cl->maxidle;
1237 cl->deficit = cl->quantum;
1238 cl->cpriority = cl->priority;
1239 }
1240 }
1241 sch->q.qlen = 0;
1242}
1243
1244
1245static int cbq_set_lss(struct cbq_class *cl, struct tc_cbq_lssopt *lss)
1246{
cc7ec456
ED
1247 if (lss->change & TCF_CBQ_LSS_FLAGS) {
1248 cl->share = (lss->flags & TCF_CBQ_LSS_ISOLATED) ? NULL : cl->tparent;
1249 cl->borrow = (lss->flags & TCF_CBQ_LSS_BOUNDED) ? NULL : cl->tparent;
1da177e4 1250 }
cc7ec456 1251 if (lss->change & TCF_CBQ_LSS_EWMA)
1da177e4 1252 cl->ewma_log = lss->ewma_log;
cc7ec456 1253 if (lss->change & TCF_CBQ_LSS_AVPKT)
1da177e4 1254 cl->avpkt = lss->avpkt;
cc7ec456 1255 if (lss->change & TCF_CBQ_LSS_MINIDLE)
1da177e4 1256 cl->minidle = -(long)lss->minidle;
cc7ec456 1257 if (lss->change & TCF_CBQ_LSS_MAXIDLE) {
1da177e4
LT
1258 cl->maxidle = lss->maxidle;
1259 cl->avgidle = lss->maxidle;
1260 }
cc7ec456 1261 if (lss->change & TCF_CBQ_LSS_OFFTIME)
1da177e4
LT
1262 cl->offtime = lss->offtime;
1263 return 0;
1264}
1265
1266static void cbq_rmprio(struct cbq_sched_data *q, struct cbq_class *cl)
1267{
1268 q->nclasses[cl->priority]--;
1269 q->quanta[cl->priority] -= cl->weight;
1270 cbq_normalize_quanta(q, cl->priority);
1271}
1272
1273static void cbq_addprio(struct cbq_sched_data *q, struct cbq_class *cl)
1274{
1275 q->nclasses[cl->priority]++;
1276 q->quanta[cl->priority] += cl->weight;
1277 cbq_normalize_quanta(q, cl->priority);
1278}
1279
1280static int cbq_set_wrr(struct cbq_class *cl, struct tc_cbq_wrropt *wrr)
1281{
1282 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
1283
1284 if (wrr->allot)
1285 cl->allot = wrr->allot;
1286 if (wrr->weight)
1287 cl->weight = wrr->weight;
1288 if (wrr->priority) {
cc7ec456 1289 cl->priority = wrr->priority - 1;
1da177e4
LT
1290 cl->cpriority = cl->priority;
1291 if (cl->priority >= cl->priority2)
cc7ec456 1292 cl->priority2 = TC_CBQ_MAXPRIO - 1;
1da177e4
LT
1293 }
1294
1295 cbq_addprio(q, cl);
1296 return 0;
1297}
1298
1299static int cbq_set_overlimit(struct cbq_class *cl, struct tc_cbq_ovl *ovl)
1300{
1301 switch (ovl->strategy) {
1302 case TC_CBQ_OVL_CLASSIC:
1303 cl->overlimit = cbq_ovl_classic;
1304 break;
1305 case TC_CBQ_OVL_DELAY:
1306 cl->overlimit = cbq_ovl_delay;
1307 break;
1308 case TC_CBQ_OVL_LOWPRIO:
cc7ec456
ED
1309 if (ovl->priority2 - 1 >= TC_CBQ_MAXPRIO ||
1310 ovl->priority2 - 1 <= cl->priority)
1da177e4 1311 return -EINVAL;
cc7ec456 1312 cl->priority2 = ovl->priority2 - 1;
1da177e4
LT
1313 cl->overlimit = cbq_ovl_lowprio;
1314 break;
1315 case TC_CBQ_OVL_DROP:
1316 cl->overlimit = cbq_ovl_drop;
1317 break;
1318 case TC_CBQ_OVL_RCLASSIC:
1319 cl->overlimit = cbq_ovl_rclassic;
1320 break;
1321 default:
1322 return -EINVAL;
1323 }
1a13cb63 1324 cl->penalty = ovl->penalty;
1da177e4
LT
1325 return 0;
1326}
1327
c3bc7cff 1328#ifdef CONFIG_NET_CLS_ACT
1da177e4
LT
1329static int cbq_set_police(struct cbq_class *cl, struct tc_cbq_police *p)
1330{
1331 cl->police = p->police;
1332
1333 if (cl->q->handle) {
1334 if (p->police == TC_POLICE_RECLASSIFY)
1335 cl->q->reshape_fail = cbq_reshape_fail;
1336 else
1337 cl->q->reshape_fail = NULL;
1338 }
1339 return 0;
1340}
1341#endif
1342
1343static int cbq_set_fopt(struct cbq_class *cl, struct tc_cbq_fopt *fopt)
1344{
1345 cbq_change_defmap(cl, fopt->split, fopt->defmap, fopt->defchange);
1346 return 0;
1347}
1348
27a3421e
PM
1349static const struct nla_policy cbq_policy[TCA_CBQ_MAX + 1] = {
1350 [TCA_CBQ_LSSOPT] = { .len = sizeof(struct tc_cbq_lssopt) },
1351 [TCA_CBQ_WRROPT] = { .len = sizeof(struct tc_cbq_wrropt) },
1352 [TCA_CBQ_FOPT] = { .len = sizeof(struct tc_cbq_fopt) },
1353 [TCA_CBQ_OVL_STRATEGY] = { .len = sizeof(struct tc_cbq_ovl) },
1354 [TCA_CBQ_RATE] = { .len = sizeof(struct tc_ratespec) },
1355 [TCA_CBQ_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
1356 [TCA_CBQ_POLICE] = { .len = sizeof(struct tc_cbq_police) },
1357};
1358
1e90474c 1359static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
1da177e4
LT
1360{
1361 struct cbq_sched_data *q = qdisc_priv(sch);
1e90474c 1362 struct nlattr *tb[TCA_CBQ_MAX + 1];
1da177e4 1363 struct tc_ratespec *r;
cee63723
PM
1364 int err;
1365
27a3421e 1366 err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy);
cee63723
PM
1367 if (err < 0)
1368 return err;
1da177e4 1369
27a3421e 1370 if (tb[TCA_CBQ_RTAB] == NULL || tb[TCA_CBQ_RATE] == NULL)
1da177e4
LT
1371 return -EINVAL;
1372
1e90474c 1373 r = nla_data(tb[TCA_CBQ_RATE]);
1da177e4 1374
1e90474c 1375 if ((q->link.R_tab = qdisc_get_rtab(r, tb[TCA_CBQ_RTAB])) == NULL)
1da177e4
LT
1376 return -EINVAL;
1377
d77fea2e
PM
1378 err = qdisc_class_hash_init(&q->clhash);
1379 if (err < 0)
1380 goto put_rtab;
1381
1da177e4
LT
1382 q->link.refcnt = 1;
1383 q->link.sibling = &q->link;
d77fea2e 1384 q->link.common.classid = sch->handle;
1da177e4 1385 q->link.qdisc = sch;
3511c913
CG
1386 q->link.q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1387 sch->handle);
1388 if (!q->link.q)
1da177e4
LT
1389 q->link.q = &noop_qdisc;
1390
cc7ec456
ED
1391 q->link.priority = TC_CBQ_MAXPRIO - 1;
1392 q->link.priority2 = TC_CBQ_MAXPRIO - 1;
1393 q->link.cpriority = TC_CBQ_MAXPRIO - 1;
1da177e4
LT
1394 q->link.ovl_strategy = TC_CBQ_OVL_CLASSIC;
1395 q->link.overlimit = cbq_ovl_classic;
5ce2d488 1396 q->link.allot = psched_mtu(qdisc_dev(sch));
1da177e4
LT
1397 q->link.quantum = q->link.allot;
1398 q->link.weight = q->link.R_tab->rate.rate;
1399
1400 q->link.ewma_log = TC_CBQ_DEF_EWMA;
1401 q->link.avpkt = q->link.allot/2;
1402 q->link.minidle = -0x7FFFFFFF;
1da177e4 1403
88a99354 1404 qdisc_watchdog_init(&q->watchdog, sch);
2fbd3da3 1405 hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1da177e4
LT
1406 q->delay_timer.function = cbq_undelay;
1407 q->toplevel = TC_CBQ_MAXLEVEL;
3bebcda2 1408 q->now = psched_get_time();
1da177e4
LT
1409 q->now_rt = q->now;
1410
1411 cbq_link_class(&q->link);
1412
1e90474c
PM
1413 if (tb[TCA_CBQ_LSSOPT])
1414 cbq_set_lss(&q->link, nla_data(tb[TCA_CBQ_LSSOPT]));
1da177e4
LT
1415
1416 cbq_addprio(q, &q->link);
1417 return 0;
d77fea2e
PM
1418
1419put_rtab:
1420 qdisc_put_rtab(q->link.R_tab);
1421 return err;
1da177e4
LT
1422}
1423
cc7ec456 1424static int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl)
1da177e4 1425{
27a884dc 1426 unsigned char *b = skb_tail_pointer(skb);
1da177e4 1427
1b34ec43
DM
1428 if (nla_put(skb, TCA_CBQ_RATE, sizeof(cl->R_tab->rate), &cl->R_tab->rate))
1429 goto nla_put_failure;
1da177e4
LT
1430 return skb->len;
1431
1e90474c 1432nla_put_failure:
dc5fc579 1433 nlmsg_trim(skb, b);
1da177e4
LT
1434 return -1;
1435}
1436
cc7ec456 1437static int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl)
1da177e4 1438{
27a884dc 1439 unsigned char *b = skb_tail_pointer(skb);
1da177e4
LT
1440 struct tc_cbq_lssopt opt;
1441
1442 opt.flags = 0;
1443 if (cl->borrow == NULL)
1444 opt.flags |= TCF_CBQ_LSS_BOUNDED;
1445 if (cl->share == NULL)
1446 opt.flags |= TCF_CBQ_LSS_ISOLATED;
1447 opt.ewma_log = cl->ewma_log;
1448 opt.level = cl->level;
1449 opt.avpkt = cl->avpkt;
1450 opt.maxidle = cl->maxidle;
1451 opt.minidle = (u32)(-cl->minidle);
1452 opt.offtime = cl->offtime;
1453 opt.change = ~0;
1b34ec43
DM
1454 if (nla_put(skb, TCA_CBQ_LSSOPT, sizeof(opt), &opt))
1455 goto nla_put_failure;
1da177e4
LT
1456 return skb->len;
1457
1e90474c 1458nla_put_failure:
dc5fc579 1459 nlmsg_trim(skb, b);
1da177e4
LT
1460 return -1;
1461}
1462
cc7ec456 1463static int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl)
1da177e4 1464{
27a884dc 1465 unsigned char *b = skb_tail_pointer(skb);
1da177e4
LT
1466 struct tc_cbq_wrropt opt;
1467
1468 opt.flags = 0;
1469 opt.allot = cl->allot;
cc7ec456
ED
1470 opt.priority = cl->priority + 1;
1471 opt.cpriority = cl->cpriority + 1;
1da177e4 1472 opt.weight = cl->weight;
1b34ec43
DM
1473 if (nla_put(skb, TCA_CBQ_WRROPT, sizeof(opt), &opt))
1474 goto nla_put_failure;
1da177e4
LT
1475 return skb->len;
1476
1e90474c 1477nla_put_failure:
dc5fc579 1478 nlmsg_trim(skb, b);
1da177e4
LT
1479 return -1;
1480}
1481
cc7ec456 1482static int cbq_dump_ovl(struct sk_buff *skb, struct cbq_class *cl)
1da177e4 1483{
27a884dc 1484 unsigned char *b = skb_tail_pointer(skb);
1da177e4
LT
1485 struct tc_cbq_ovl opt;
1486
1487 opt.strategy = cl->ovl_strategy;
cc7ec456 1488 opt.priority2 = cl->priority2 + 1;
8a47077a 1489 opt.pad = 0;
1a13cb63 1490 opt.penalty = cl->penalty;
1b34ec43
DM
1491 if (nla_put(skb, TCA_CBQ_OVL_STRATEGY, sizeof(opt), &opt))
1492 goto nla_put_failure;
1da177e4
LT
1493 return skb->len;
1494
1e90474c 1495nla_put_failure:
dc5fc579 1496 nlmsg_trim(skb, b);
1da177e4
LT
1497 return -1;
1498}
1499
cc7ec456 1500static int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl)
1da177e4 1501{
27a884dc 1502 unsigned char *b = skb_tail_pointer(skb);
1da177e4
LT
1503 struct tc_cbq_fopt opt;
1504
1505 if (cl->split || cl->defmap) {
d77fea2e 1506 opt.split = cl->split ? cl->split->common.classid : 0;
1da177e4
LT
1507 opt.defmap = cl->defmap;
1508 opt.defchange = ~0;
1b34ec43
DM
1509 if (nla_put(skb, TCA_CBQ_FOPT, sizeof(opt), &opt))
1510 goto nla_put_failure;
1da177e4
LT
1511 }
1512 return skb->len;
1513
1e90474c 1514nla_put_failure:
dc5fc579 1515 nlmsg_trim(skb, b);
1da177e4
LT
1516 return -1;
1517}
1518
c3bc7cff 1519#ifdef CONFIG_NET_CLS_ACT
cc7ec456 1520static int cbq_dump_police(struct sk_buff *skb, struct cbq_class *cl)
1da177e4 1521{
27a884dc 1522 unsigned char *b = skb_tail_pointer(skb);
1da177e4
LT
1523 struct tc_cbq_police opt;
1524
1525 if (cl->police) {
1526 opt.police = cl->police;
9ef1d4c7
PM
1527 opt.__res1 = 0;
1528 opt.__res2 = 0;
1b34ec43
DM
1529 if (nla_put(skb, TCA_CBQ_POLICE, sizeof(opt), &opt))
1530 goto nla_put_failure;
1da177e4
LT
1531 }
1532 return skb->len;
1533
1e90474c 1534nla_put_failure:
dc5fc579 1535 nlmsg_trim(skb, b);
1da177e4
LT
1536 return -1;
1537}
1538#endif
1539
1540static int cbq_dump_attr(struct sk_buff *skb, struct cbq_class *cl)
1541{
1542 if (cbq_dump_lss(skb, cl) < 0 ||
1543 cbq_dump_rate(skb, cl) < 0 ||
1544 cbq_dump_wrr(skb, cl) < 0 ||
1545 cbq_dump_ovl(skb, cl) < 0 ||
c3bc7cff 1546#ifdef CONFIG_NET_CLS_ACT
1da177e4
LT
1547 cbq_dump_police(skb, cl) < 0 ||
1548#endif
1549 cbq_dump_fopt(skb, cl) < 0)
1550 return -1;
1551 return 0;
1552}
1553
1554static int cbq_dump(struct Qdisc *sch, struct sk_buff *skb)
1555{
1556 struct cbq_sched_data *q = qdisc_priv(sch);
4b3550ef 1557 struct nlattr *nest;
1da177e4 1558
4b3550ef
PM
1559 nest = nla_nest_start(skb, TCA_OPTIONS);
1560 if (nest == NULL)
1561 goto nla_put_failure;
1da177e4 1562 if (cbq_dump_attr(skb, &q->link) < 0)
1e90474c 1563 goto nla_put_failure;
4b3550ef 1564 nla_nest_end(skb, nest);
1da177e4
LT
1565 return skb->len;
1566
1e90474c 1567nla_put_failure:
4b3550ef 1568 nla_nest_cancel(skb, nest);
1da177e4
LT
1569 return -1;
1570}
1571
1572static int
1573cbq_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
1574{
1575 struct cbq_sched_data *q = qdisc_priv(sch);
1576
1577 q->link.xstats.avgidle = q->link.avgidle;
1578 return gnet_stats_copy_app(d, &q->link.xstats, sizeof(q->link.xstats));
1579}
1580
1581static int
1582cbq_dump_class(struct Qdisc *sch, unsigned long arg,
1583 struct sk_buff *skb, struct tcmsg *tcm)
1584{
cc7ec456 1585 struct cbq_class *cl = (struct cbq_class *)arg;
4b3550ef 1586 struct nlattr *nest;
1da177e4
LT
1587
1588 if (cl->tparent)
d77fea2e 1589 tcm->tcm_parent = cl->tparent->common.classid;
1da177e4
LT
1590 else
1591 tcm->tcm_parent = TC_H_ROOT;
d77fea2e 1592 tcm->tcm_handle = cl->common.classid;
1da177e4
LT
1593 tcm->tcm_info = cl->q->handle;
1594
4b3550ef
PM
1595 nest = nla_nest_start(skb, TCA_OPTIONS);
1596 if (nest == NULL)
1597 goto nla_put_failure;
1da177e4 1598 if (cbq_dump_attr(skb, cl) < 0)
1e90474c 1599 goto nla_put_failure;
4b3550ef 1600 nla_nest_end(skb, nest);
1da177e4
LT
1601 return skb->len;
1602
1e90474c 1603nla_put_failure:
4b3550ef 1604 nla_nest_cancel(skb, nest);
1da177e4
LT
1605 return -1;
1606}
1607
1608static int
1609cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
1610 struct gnet_dump *d)
1611{
1612 struct cbq_sched_data *q = qdisc_priv(sch);
cc7ec456 1613 struct cbq_class *cl = (struct cbq_class *)arg;
1da177e4
LT
1614
1615 cl->qstats.qlen = cl->q->q.qlen;
1616 cl->xstats.avgidle = cl->avgidle;
1617 cl->xstats.undertime = 0;
1618
a084980d 1619 if (cl->undertime != PSCHED_PASTPERFECT)
8edc0c31 1620 cl->xstats.undertime = cl->undertime - q->now;
1da177e4
LT
1621
1622 if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
d250a5f9 1623 gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 ||
1da177e4
LT
1624 gnet_stats_copy_queue(d, &cl->qstats) < 0)
1625 return -1;
1626
1627 return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
1628}
1629
1630static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1631 struct Qdisc **old)
1632{
cc7ec456 1633 struct cbq_class *cl = (struct cbq_class *)arg;
1da177e4 1634
5b9a9ccf 1635 if (new == NULL) {
3511c913 1636 new = qdisc_create_dflt(sch->dev_queue,
5b9a9ccf
PM
1637 &pfifo_qdisc_ops, cl->common.classid);
1638 if (new == NULL)
1639 return -ENOBUFS;
1640 } else {
c3bc7cff 1641#ifdef CONFIG_NET_CLS_ACT
5b9a9ccf
PM
1642 if (cl->police == TC_POLICE_RECLASSIFY)
1643 new->reshape_fail = cbq_reshape_fail;
1da177e4 1644#endif
1da177e4 1645 }
5b9a9ccf
PM
1646 sch_tree_lock(sch);
1647 *old = cl->q;
1648 cl->q = new;
1649 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
1650 qdisc_reset(*old);
1651 sch_tree_unlock(sch);
1652
1653 return 0;
1da177e4
LT
1654}
1655
cc7ec456 1656static struct Qdisc *cbq_leaf(struct Qdisc *sch, unsigned long arg)
1da177e4 1657{
cc7ec456 1658 struct cbq_class *cl = (struct cbq_class *)arg;
1da177e4 1659
5b9a9ccf 1660 return cl->q;
1da177e4
LT
1661}
1662
a37ef2e3
JP
1663static void cbq_qlen_notify(struct Qdisc *sch, unsigned long arg)
1664{
1665 struct cbq_class *cl = (struct cbq_class *)arg;
1666
1667 if (cl->q->q.qlen == 0)
1668 cbq_deactivate_class(cl);
1669}
1670
1da177e4
LT
1671static unsigned long cbq_get(struct Qdisc *sch, u32 classid)
1672{
1673 struct cbq_sched_data *q = qdisc_priv(sch);
1674 struct cbq_class *cl = cbq_class_lookup(q, classid);
1675
1676 if (cl) {
1677 cl->refcnt++;
1678 return (unsigned long)cl;
1679 }
1680 return 0;
1681}
1682
1da177e4
LT
1683static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl)
1684{
1685 struct cbq_sched_data *q = qdisc_priv(sch);
1686
547b792c 1687 WARN_ON(cl->filters);
1da177e4 1688
ff31ab56 1689 tcf_destroy_chain(&cl->filter_list);
1da177e4
LT
1690 qdisc_destroy(cl->q);
1691 qdisc_put_rtab(cl->R_tab);
1da177e4 1692 gen_kill_estimator(&cl->bstats, &cl->rate_est);
1da177e4
LT
1693 if (cl != &q->link)
1694 kfree(cl);
1695}
1696
cc7ec456 1697static void cbq_destroy(struct Qdisc *sch)
1da177e4
LT
1698{
1699 struct cbq_sched_data *q = qdisc_priv(sch);
d77fea2e 1700 struct hlist_node *n, *next;
1da177e4 1701 struct cbq_class *cl;
cc7ec456 1702 unsigned int h;
1da177e4 1703
c3bc7cff 1704#ifdef CONFIG_NET_CLS_ACT
1da177e4
LT
1705 q->rx_class = NULL;
1706#endif
1707 /*
1708 * Filters must be destroyed first because we don't destroy the
1709 * classes from root to leafs which means that filters can still
1710 * be bound to classes which have been destroyed already. --TGR '04
1711 */
d77fea2e
PM
1712 for (h = 0; h < q->clhash.hashsize; h++) {
1713 hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode)
ff31ab56 1714 tcf_destroy_chain(&cl->filter_list);
b00b4bf9 1715 }
d77fea2e
PM
1716 for (h = 0; h < q->clhash.hashsize; h++) {
1717 hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[h],
1718 common.hnode)
1da177e4 1719 cbq_destroy_class(sch, cl);
1da177e4 1720 }
d77fea2e 1721 qdisc_class_hash_destroy(&q->clhash);
1da177e4
LT
1722}
1723
1724static void cbq_put(struct Qdisc *sch, unsigned long arg)
1725{
cc7ec456 1726 struct cbq_class *cl = (struct cbq_class *)arg;
1da177e4
LT
1727
1728 if (--cl->refcnt == 0) {
c3bc7cff 1729#ifdef CONFIG_NET_CLS_ACT
102396ae 1730 spinlock_t *root_lock = qdisc_root_sleeping_lock(sch);
1da177e4
LT
1731 struct cbq_sched_data *q = qdisc_priv(sch);
1732
7698b4fc 1733 spin_lock_bh(root_lock);
1da177e4
LT
1734 if (q->rx_class == cl)
1735 q->rx_class = NULL;
7698b4fc 1736 spin_unlock_bh(root_lock);
1da177e4
LT
1737#endif
1738
1739 cbq_destroy_class(sch, cl);
1740 }
1741}
1742
1743static int
1e90474c 1744cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **tca,
1da177e4
LT
1745 unsigned long *arg)
1746{
1747 int err;
1748 struct cbq_sched_data *q = qdisc_priv(sch);
cc7ec456 1749 struct cbq_class *cl = (struct cbq_class *)*arg;
1e90474c
PM
1750 struct nlattr *opt = tca[TCA_OPTIONS];
1751 struct nlattr *tb[TCA_CBQ_MAX + 1];
1da177e4
LT
1752 struct cbq_class *parent;
1753 struct qdisc_rate_table *rtab = NULL;
1754
cee63723 1755 if (opt == NULL)
1da177e4
LT
1756 return -EINVAL;
1757
27a3421e 1758 err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy);
cee63723
PM
1759 if (err < 0)
1760 return err;
1761
1da177e4
LT
1762 if (cl) {
1763 /* Check parent */
1764 if (parentid) {
d77fea2e
PM
1765 if (cl->tparent &&
1766 cl->tparent->common.classid != parentid)
1da177e4
LT
1767 return -EINVAL;
1768 if (!cl->tparent && parentid != TC_H_ROOT)
1769 return -EINVAL;
1770 }
1771
1e90474c 1772 if (tb[TCA_CBQ_RATE]) {
71bcb09a
SH
1773 rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]),
1774 tb[TCA_CBQ_RTAB]);
1da177e4
LT
1775 if (rtab == NULL)
1776 return -EINVAL;
1777 }
1778
71bcb09a
SH
1779 if (tca[TCA_RATE]) {
1780 err = gen_replace_estimator(&cl->bstats, &cl->rate_est,
1781 qdisc_root_sleeping_lock(sch),
1782 tca[TCA_RATE]);
1783 if (err) {
1784 if (rtab)
1785 qdisc_put_rtab(rtab);
1786 return err;
1787 }
1788 }
1789
1da177e4
LT
1790 /* Change class parameters */
1791 sch_tree_lock(sch);
1792
1793 if (cl->next_alive != NULL)
1794 cbq_deactivate_class(cl);
1795
1796 if (rtab) {
b94c8afc
PM
1797 qdisc_put_rtab(cl->R_tab);
1798 cl->R_tab = rtab;
1da177e4
LT
1799 }
1800
1e90474c
PM
1801 if (tb[TCA_CBQ_LSSOPT])
1802 cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT]));
1da177e4 1803
1e90474c 1804 if (tb[TCA_CBQ_WRROPT]) {
1da177e4 1805 cbq_rmprio(q, cl);
1e90474c 1806 cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT]));
1da177e4
LT
1807 }
1808
1e90474c
PM
1809 if (tb[TCA_CBQ_OVL_STRATEGY])
1810 cbq_set_overlimit(cl, nla_data(tb[TCA_CBQ_OVL_STRATEGY]));
1da177e4 1811
c3bc7cff 1812#ifdef CONFIG_NET_CLS_ACT
1e90474c
PM
1813 if (tb[TCA_CBQ_POLICE])
1814 cbq_set_police(cl, nla_data(tb[TCA_CBQ_POLICE]));
1da177e4
LT
1815#endif
1816
1e90474c
PM
1817 if (tb[TCA_CBQ_FOPT])
1818 cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT]));
1da177e4
LT
1819
1820 if (cl->q->q.qlen)
1821 cbq_activate_class(cl);
1822
1823 sch_tree_unlock(sch);
1824
1da177e4
LT
1825 return 0;
1826 }
1827
1828 if (parentid == TC_H_ROOT)
1829 return -EINVAL;
1830
1e90474c
PM
1831 if (tb[TCA_CBQ_WRROPT] == NULL || tb[TCA_CBQ_RATE] == NULL ||
1832 tb[TCA_CBQ_LSSOPT] == NULL)
1da177e4
LT
1833 return -EINVAL;
1834
1e90474c 1835 rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]), tb[TCA_CBQ_RTAB]);
1da177e4
LT
1836 if (rtab == NULL)
1837 return -EINVAL;
1838
1839 if (classid) {
1840 err = -EINVAL;
cc7ec456
ED
1841 if (TC_H_MAJ(classid ^ sch->handle) ||
1842 cbq_class_lookup(q, classid))
1da177e4
LT
1843 goto failure;
1844 } else {
1845 int i;
cc7ec456 1846 classid = TC_H_MAKE(sch->handle, 0x8000);
1da177e4 1847
cc7ec456 1848 for (i = 0; i < 0x8000; i++) {
1da177e4
LT
1849 if (++q->hgenerator >= 0x8000)
1850 q->hgenerator = 1;
1851 if (cbq_class_lookup(q, classid|q->hgenerator) == NULL)
1852 break;
1853 }
1854 err = -ENOSR;
1855 if (i >= 0x8000)
1856 goto failure;
1857 classid = classid|q->hgenerator;
1858 }
1859
1860 parent = &q->link;
1861 if (parentid) {
1862 parent = cbq_class_lookup(q, parentid);
1863 err = -EINVAL;
1864 if (parent == NULL)
1865 goto failure;
1866 }
1867
1868 err = -ENOBUFS;
0da974f4 1869 cl = kzalloc(sizeof(*cl), GFP_KERNEL);
1da177e4
LT
1870 if (cl == NULL)
1871 goto failure;
71bcb09a
SH
1872
1873 if (tca[TCA_RATE]) {
1874 err = gen_new_estimator(&cl->bstats, &cl->rate_est,
1875 qdisc_root_sleeping_lock(sch),
1876 tca[TCA_RATE]);
1877 if (err) {
1878 kfree(cl);
1879 goto failure;
1880 }
1881 }
1882
1da177e4
LT
1883 cl->R_tab = rtab;
1884 rtab = NULL;
1885 cl->refcnt = 1;
3511c913
CG
1886 cl->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid);
1887 if (!cl->q)
1da177e4 1888 cl->q = &noop_qdisc;
d77fea2e 1889 cl->common.classid = classid;
1da177e4
LT
1890 cl->tparent = parent;
1891 cl->qdisc = sch;
1892 cl->allot = parent->allot;
1893 cl->quantum = cl->allot;
1894 cl->weight = cl->R_tab->rate.rate;
1da177e4
LT
1895
1896 sch_tree_lock(sch);
1897 cbq_link_class(cl);
1898 cl->borrow = cl->tparent;
1899 if (cl->tparent != &q->link)
1900 cl->share = cl->tparent;
1901 cbq_adjust_levels(parent);
1902 cl->minidle = -0x7FFFFFFF;
1e90474c
PM
1903 cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT]));
1904 cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT]));
cc7ec456 1905 if (cl->ewma_log == 0)
1da177e4 1906 cl->ewma_log = q->link.ewma_log;
cc7ec456 1907 if (cl->maxidle == 0)
1da177e4 1908 cl->maxidle = q->link.maxidle;
cc7ec456 1909 if (cl->avpkt == 0)
1da177e4
LT
1910 cl->avpkt = q->link.avpkt;
1911 cl->overlimit = cbq_ovl_classic;
1e90474c
PM
1912 if (tb[TCA_CBQ_OVL_STRATEGY])
1913 cbq_set_overlimit(cl, nla_data(tb[TCA_CBQ_OVL_STRATEGY]));
c3bc7cff 1914#ifdef CONFIG_NET_CLS_ACT
1e90474c
PM
1915 if (tb[TCA_CBQ_POLICE])
1916 cbq_set_police(cl, nla_data(tb[TCA_CBQ_POLICE]));
1da177e4 1917#endif
1e90474c
PM
1918 if (tb[TCA_CBQ_FOPT])
1919 cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT]));
1da177e4
LT
1920 sch_tree_unlock(sch);
1921
d77fea2e
PM
1922 qdisc_class_hash_grow(sch, &q->clhash);
1923
1da177e4
LT
1924 *arg = (unsigned long)cl;
1925 return 0;
1926
1927failure:
1928 qdisc_put_rtab(rtab);
1929 return err;
1930}
1931
1932static int cbq_delete(struct Qdisc *sch, unsigned long arg)
1933{
1934 struct cbq_sched_data *q = qdisc_priv(sch);
cc7ec456 1935 struct cbq_class *cl = (struct cbq_class *)arg;
a37ef2e3 1936 unsigned int qlen;
1da177e4
LT
1937
1938 if (cl->filters || cl->children || cl == &q->link)
1939 return -EBUSY;
1940
1941 sch_tree_lock(sch);
1942
a37ef2e3
JP
1943 qlen = cl->q->q.qlen;
1944 qdisc_reset(cl->q);
1945 qdisc_tree_decrease_qlen(cl->q, qlen);
1946
1da177e4
LT
1947 if (cl->next_alive)
1948 cbq_deactivate_class(cl);
1949
1950 if (q->tx_borrowed == cl)
1951 q->tx_borrowed = q->tx_class;
1952 if (q->tx_class == cl) {
1953 q->tx_class = NULL;
1954 q->tx_borrowed = NULL;
1955 }
c3bc7cff 1956#ifdef CONFIG_NET_CLS_ACT
1da177e4
LT
1957 if (q->rx_class == cl)
1958 q->rx_class = NULL;
1959#endif
1960
1961 cbq_unlink_class(cl);
1962 cbq_adjust_levels(cl->tparent);
1963 cl->defmap = 0;
1964 cbq_sync_defmap(cl);
1965
1966 cbq_rmprio(q, cl);
1967 sch_tree_unlock(sch);
1968
7cd0a638
JP
1969 BUG_ON(--cl->refcnt == 0);
1970 /*
1971 * This shouldn't happen: we "hold" one cops->get() when called
1972 * from tc_ctl_tclass; the destroy method is done from cops->put().
1973 */
1da177e4
LT
1974
1975 return 0;
1976}
1977
1978static struct tcf_proto **cbq_find_tcf(struct Qdisc *sch, unsigned long arg)
1979{
1980 struct cbq_sched_data *q = qdisc_priv(sch);
1981 struct cbq_class *cl = (struct cbq_class *)arg;
1982
1983 if (cl == NULL)
1984 cl = &q->link;
1985
1986 return &cl->filter_list;
1987}
1988
1989static unsigned long cbq_bind_filter(struct Qdisc *sch, unsigned long parent,
1990 u32 classid)
1991{
1992 struct cbq_sched_data *q = qdisc_priv(sch);
cc7ec456 1993 struct cbq_class *p = (struct cbq_class *)parent;
1da177e4
LT
1994 struct cbq_class *cl = cbq_class_lookup(q, classid);
1995
1996 if (cl) {
1997 if (p && p->level <= cl->level)
1998 return 0;
1999 cl->filters++;
2000 return (unsigned long)cl;
2001 }
2002 return 0;
2003}
2004
2005static void cbq_unbind_filter(struct Qdisc *sch, unsigned long arg)
2006{
cc7ec456 2007 struct cbq_class *cl = (struct cbq_class *)arg;
1da177e4
LT
2008
2009 cl->filters--;
2010}
2011
2012static void cbq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
2013{
2014 struct cbq_sched_data *q = qdisc_priv(sch);
d77fea2e
PM
2015 struct cbq_class *cl;
2016 struct hlist_node *n;
cc7ec456 2017 unsigned int h;
1da177e4
LT
2018
2019 if (arg->stop)
2020 return;
2021
d77fea2e
PM
2022 for (h = 0; h < q->clhash.hashsize; h++) {
2023 hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode) {
1da177e4
LT
2024 if (arg->count < arg->skip) {
2025 arg->count++;
2026 continue;
2027 }
2028 if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
2029 arg->stop = 1;
2030 return;
2031 }
2032 arg->count++;
2033 }
2034 }
2035}
2036
20fea08b 2037static const struct Qdisc_class_ops cbq_class_ops = {
1da177e4
LT
2038 .graft = cbq_graft,
2039 .leaf = cbq_leaf,
a37ef2e3 2040 .qlen_notify = cbq_qlen_notify,
1da177e4
LT
2041 .get = cbq_get,
2042 .put = cbq_put,
2043 .change = cbq_change_class,
2044 .delete = cbq_delete,
2045 .walk = cbq_walk,
2046 .tcf_chain = cbq_find_tcf,
2047 .bind_tcf = cbq_bind_filter,
2048 .unbind_tcf = cbq_unbind_filter,
2049 .dump = cbq_dump_class,
2050 .dump_stats = cbq_dump_class_stats,
2051};
2052
20fea08b 2053static struct Qdisc_ops cbq_qdisc_ops __read_mostly = {
1da177e4
LT
2054 .next = NULL,
2055 .cl_ops = &cbq_class_ops,
2056 .id = "cbq",
2057 .priv_size = sizeof(struct cbq_sched_data),
2058 .enqueue = cbq_enqueue,
2059 .dequeue = cbq_dequeue,
77be155c 2060 .peek = qdisc_peek_dequeued,
1da177e4
LT
2061 .drop = cbq_drop,
2062 .init = cbq_init,
2063 .reset = cbq_reset,
2064 .destroy = cbq_destroy,
2065 .change = NULL,
2066 .dump = cbq_dump,
2067 .dump_stats = cbq_dump_stats,
2068 .owner = THIS_MODULE,
2069};
2070
2071static int __init cbq_module_init(void)
2072{
2073 return register_qdisc(&cbq_qdisc_ops);
2074}
10297b99 2075static void __exit cbq_module_exit(void)
1da177e4
LT
2076{
2077 unregister_qdisc(&cbq_qdisc_ops);
2078}
2079module_init(cbq_module_init)
2080module_exit(cbq_module_exit)
2081MODULE_LICENSE("GPL");