Merge remote-tracking branches 'spi/topic/bcm53xx', 'spi/topic/cadence', 'spi/topic...
[linux-2.6-block.git] / net / sched / sch_cbq.c
CommitLineData
1da177e4
LT
1/*
2 * net/sched/sch_cbq.c Class-Based Queueing discipline.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 *
11 */
12
1da177e4 13#include <linux/module.h>
5a0e3ad6 14#include <linux/slab.h>
1da177e4
LT
15#include <linux/types.h>
16#include <linux/kernel.h>
1da177e4 17#include <linux/string.h>
1da177e4 18#include <linux/errno.h>
1da177e4 19#include <linux/skbuff.h>
0ba48053 20#include <net/netlink.h>
1da177e4
LT
21#include <net/pkt_sched.h>
22
23
24/* Class-Based Queueing (CBQ) algorithm.
25 =======================================
26
27 Sources: [1] Sally Floyd and Van Jacobson, "Link-sharing and Resource
10297b99 28 Management Models for Packet Networks",
1da177e4
LT
29 IEEE/ACM Transactions on Networking, Vol.3, No.4, 1995
30
10297b99 31 [2] Sally Floyd, "Notes on CBQ and Guaranteed Service", 1995
1da177e4 32
10297b99 33 [3] Sally Floyd, "Notes on Class-Based Queueing: Setting
1da177e4
LT
34 Parameters", 1996
35
36 [4] Sally Floyd and Michael Speer, "Experimental Results
37 for Class-Based Queueing", 1998, not published.
38
39 -----------------------------------------------------------------------
40
41 Algorithm skeleton was taken from NS simulator cbq.cc.
42 If someone wants to check this code against the LBL version,
43 he should take into account that ONLY the skeleton was borrowed,
44 the implementation is different. Particularly:
45
46 --- The WRR algorithm is different. Our version looks more
10297b99
YH
47 reasonable (I hope) and works when quanta are allowed to be
48 less than MTU, which is always the case when real time classes
49 have small rates. Note, that the statement of [3] is
50 incomplete, delay may actually be estimated even if class
51 per-round allotment is less than MTU. Namely, if per-round
52 allotment is W*r_i, and r_1+...+r_k = r < 1
1da177e4
LT
53
54 delay_i <= ([MTU/(W*r_i)]*W*r + W*r + k*MTU)/B
55
56 In the worst case we have IntServ estimate with D = W*r+k*MTU
57 and C = MTU*r. The proof (if correct at all) is trivial.
58
59
60 --- It seems that cbq-2.0 is not very accurate. At least, I cannot
61 interpret some places, which look like wrong translations
62 from NS. Anyone is advised to find these differences
63 and explain to me, why I am wrong 8).
64
65 --- Linux has no EOI event, so that we cannot estimate true class
66 idle time. Workaround is to consider the next dequeue event
67 as sign that previous packet is finished. This is wrong because of
68 internal device queueing, but on a permanently loaded link it is true.
69 Moreover, combined with clock integrator, this scheme looks
70 very close to an ideal solution. */
71
72struct cbq_sched_data;
73
74
cc7ec456 75struct cbq_class {
d77fea2e 76 struct Qdisc_class_common common;
1da177e4
LT
77 struct cbq_class *next_alive; /* next class with backlog in this priority band */
78
79/* Parameters */
1da177e4
LT
80 unsigned char priority; /* class priority */
81 unsigned char priority2; /* priority to be used after overlimit */
82 unsigned char ewma_log; /* time constant for idle time calculation */
83 unsigned char ovl_strategy;
c3bc7cff 84#ifdef CONFIG_NET_CLS_ACT
1da177e4
LT
85 unsigned char police;
86#endif
87
88 u32 defmap;
89
90 /* Link-sharing scheduler parameters */
91 long maxidle; /* Class parameters: see below. */
92 long offtime;
93 long minidle;
94 u32 avpkt;
95 struct qdisc_rate_table *R_tab;
96
97 /* Overlimit strategy parameters */
98 void (*overlimit)(struct cbq_class *cl);
1a13cb63 99 psched_tdiff_t penalty;
1da177e4
LT
100
101 /* General scheduler (WRR) parameters */
102 long allot;
103 long quantum; /* Allotment per WRR round */
104 long weight; /* Relative allotment: see below */
105
106 struct Qdisc *qdisc; /* Ptr to CBQ discipline */
107 struct cbq_class *split; /* Ptr to split node */
108 struct cbq_class *share; /* Ptr to LS parent in the class tree */
109 struct cbq_class *tparent; /* Ptr to tree parent in the class tree */
110 struct cbq_class *borrow; /* NULL if class is bandwidth limited;
111 parent otherwise */
112 struct cbq_class *sibling; /* Sibling chain */
113 struct cbq_class *children; /* Pointer to children chain */
114
115 struct Qdisc *q; /* Elementary queueing discipline */
116
117
118/* Variables */
119 unsigned char cpriority; /* Effective priority */
120 unsigned char delayed;
121 unsigned char level; /* level of the class in hierarchy:
122 0 for leaf classes, and maximal
123 level of children + 1 for nodes.
124 */
125
126 psched_time_t last; /* Last end of service */
127 psched_time_t undertime;
128 long avgidle;
129 long deficit; /* Saved deficit for WRR */
1a13cb63 130 psched_time_t penalized;
c1a8f1f1 131 struct gnet_stats_basic_packed bstats;
1da177e4 132 struct gnet_stats_queue qstats;
45203a3b 133 struct gnet_stats_rate_est64 rate_est;
1da177e4
LT
134 struct tc_cbq_xstats xstats;
135
25d8c0d5 136 struct tcf_proto __rcu *filter_list;
1da177e4
LT
137
138 int refcnt;
139 int filters;
140
cc7ec456 141 struct cbq_class *defaults[TC_PRIO_MAX + 1];
1da177e4
LT
142};
143
cc7ec456 144struct cbq_sched_data {
d77fea2e 145 struct Qdisc_class_hash clhash; /* Hash table of all classes */
cc7ec456
ED
146 int nclasses[TC_CBQ_MAXPRIO + 1];
147 unsigned int quanta[TC_CBQ_MAXPRIO + 1];
1da177e4
LT
148
149 struct cbq_class link;
150
cc7ec456
ED
151 unsigned int activemask;
152 struct cbq_class *active[TC_CBQ_MAXPRIO + 1]; /* List of all classes
1da177e4
LT
153 with backlog */
154
c3bc7cff 155#ifdef CONFIG_NET_CLS_ACT
1da177e4
LT
156 struct cbq_class *rx_class;
157#endif
158 struct cbq_class *tx_class;
159 struct cbq_class *tx_borrowed;
160 int tx_len;
161 psched_time_t now; /* Cached timestamp */
cc7ec456 162 unsigned int pmask;
1da177e4 163
2fbd3da3 164 struct hrtimer delay_timer;
88a99354 165 struct qdisc_watchdog watchdog; /* Watchdog timer,
1da177e4
LT
166 started when CBQ has
167 backlog, but cannot
168 transmit just now */
88a99354 169 psched_tdiff_t wd_expires;
1da177e4
LT
170 int toplevel;
171 u32 hgenerator;
172};
173
174
cc7ec456 175#define L2T(cl, len) qdisc_l2t((cl)->R_tab, len)
1da177e4 176
cc7ec456 177static inline struct cbq_class *
1da177e4
LT
178cbq_class_lookup(struct cbq_sched_data *q, u32 classid)
179{
d77fea2e 180 struct Qdisc_class_common *clc;
1da177e4 181
d77fea2e
PM
182 clc = qdisc_class_find(&q->clhash, classid);
183 if (clc == NULL)
184 return NULL;
185 return container_of(clc, struct cbq_class, common);
1da177e4
LT
186}
187
c3bc7cff 188#ifdef CONFIG_NET_CLS_ACT
1da177e4
LT
189
190static struct cbq_class *
191cbq_reclassify(struct sk_buff *skb, struct cbq_class *this)
192{
cc7ec456 193 struct cbq_class *cl;
1da177e4 194
cc7ec456
ED
195 for (cl = this->tparent; cl; cl = cl->tparent) {
196 struct cbq_class *new = cl->defaults[TC_PRIO_BESTEFFORT];
1da177e4 197
cc7ec456
ED
198 if (new != NULL && new != this)
199 return new;
200 }
1da177e4
LT
201 return NULL;
202}
203
204#endif
205
206/* Classify packet. The procedure is pretty complicated, but
cc7ec456
ED
207 * it allows us to combine link sharing and priority scheduling
208 * transparently.
209 *
210 * Namely, you can put link sharing rules (f.e. route based) at root of CBQ,
211 * so that it resolves to split nodes. Then packets are classified
212 * by logical priority, or a more specific classifier may be attached
213 * to the split node.
1da177e4
LT
214 */
215
216static struct cbq_class *
217cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
218{
219 struct cbq_sched_data *q = qdisc_priv(sch);
220 struct cbq_class *head = &q->link;
221 struct cbq_class **defmap;
222 struct cbq_class *cl = NULL;
223 u32 prio = skb->priority;
25d8c0d5 224 struct tcf_proto *fl;
1da177e4
LT
225 struct tcf_result res;
226
227 /*
228 * Step 1. If skb->priority points to one of our classes, use it.
229 */
cc7ec456 230 if (TC_H_MAJ(prio ^ sch->handle) == 0 &&
1da177e4
LT
231 (cl = cbq_class_lookup(q, prio)) != NULL)
232 return cl;
233
c27f339a 234 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
1da177e4
LT
235 for (;;) {
236 int result = 0;
237 defmap = head->defaults;
238
25d8c0d5 239 fl = rcu_dereference_bh(head->filter_list);
1da177e4
LT
240 /*
241 * Step 2+n. Apply classifier.
242 */
3b3ae880 243 result = tc_classify(skb, fl, &res, true);
25d8c0d5 244 if (!fl || result < 0)
1da177e4
LT
245 goto fallback;
246
cc7ec456
ED
247 cl = (void *)res.class;
248 if (!cl) {
1da177e4
LT
249 if (TC_H_MAJ(res.classid))
250 cl = cbq_class_lookup(q, res.classid);
cc7ec456 251 else if ((cl = defmap[res.classid & TC_PRIO_MAX]) == NULL)
1da177e4
LT
252 cl = defmap[TC_PRIO_BESTEFFORT];
253
bdfc87f7 254 if (cl == NULL)
1da177e4
LT
255 goto fallback;
256 }
bdfc87f7
ED
257 if (cl->level >= head->level)
258 goto fallback;
1da177e4
LT
259#ifdef CONFIG_NET_CLS_ACT
260 switch (result) {
261 case TC_ACT_QUEUED:
10297b99 262 case TC_ACT_STOLEN:
378a2f09 263 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
1da177e4
LT
264 case TC_ACT_SHOT:
265 return NULL;
73ca4918
PM
266 case TC_ACT_RECLASSIFY:
267 return cbq_reclassify(skb, cl);
1da177e4 268 }
1da177e4
LT
269#endif
270 if (cl->level == 0)
271 return cl;
272
273 /*
274 * Step 3+n. If classifier selected a link sharing class,
275 * apply agency specific classifier.
276 * Repeat this procdure until we hit a leaf node.
277 */
278 head = cl;
279 }
280
281fallback:
282 cl = head;
283
284 /*
285 * Step 4. No success...
286 */
287 if (TC_H_MAJ(prio) == 0 &&
cc7ec456 288 !(cl = head->defaults[prio & TC_PRIO_MAX]) &&
1da177e4
LT
289 !(cl = head->defaults[TC_PRIO_BESTEFFORT]))
290 return head;
291
292 return cl;
293}
294
295/*
cc7ec456
ED
296 * A packet has just been enqueued on the empty class.
297 * cbq_activate_class adds it to the tail of active class list
298 * of its priority band.
1da177e4
LT
299 */
300
cc7ec456 301static inline void cbq_activate_class(struct cbq_class *cl)
1da177e4
LT
302{
303 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
304 int prio = cl->cpriority;
305 struct cbq_class *cl_tail;
306
307 cl_tail = q->active[prio];
308 q->active[prio] = cl;
309
310 if (cl_tail != NULL) {
311 cl->next_alive = cl_tail->next_alive;
312 cl_tail->next_alive = cl;
313 } else {
314 cl->next_alive = cl;
315 q->activemask |= (1<<prio);
316 }
317}
318
319/*
cc7ec456
ED
320 * Unlink class from active chain.
321 * Note that this same procedure is done directly in cbq_dequeue*
322 * during round-robin procedure.
1da177e4
LT
323 */
324
325static void cbq_deactivate_class(struct cbq_class *this)
326{
327 struct cbq_sched_data *q = qdisc_priv(this->qdisc);
328 int prio = this->cpriority;
329 struct cbq_class *cl;
330 struct cbq_class *cl_prev = q->active[prio];
331
332 do {
333 cl = cl_prev->next_alive;
334 if (cl == this) {
335 cl_prev->next_alive = cl->next_alive;
336 cl->next_alive = NULL;
337
338 if (cl == q->active[prio]) {
339 q->active[prio] = cl_prev;
340 if (cl == q->active[prio]) {
341 q->active[prio] = NULL;
342 q->activemask &= ~(1<<prio);
343 return;
344 }
345 }
1da177e4
LT
346 return;
347 }
348 } while ((cl_prev = cl) != q->active[prio]);
349}
350
351static void
352cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl)
353{
354 int toplevel = q->toplevel;
355
fd245a4a 356 if (toplevel > cl->level && !(qdisc_is_throttled(cl->q))) {
7201c1dd 357 psched_time_t now = psched_get_time();
1da177e4
LT
358
359 do {
104e0878 360 if (cl->undertime < now) {
1da177e4
LT
361 q->toplevel = cl->level;
362 return;
363 }
cc7ec456 364 } while ((cl = cl->borrow) != NULL && toplevel > cl->level);
1da177e4
LT
365 }
366}
367
368static int
369cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
370{
371 struct cbq_sched_data *q = qdisc_priv(sch);
ddeee3ce 372 int uninitialized_var(ret);
1da177e4
LT
373 struct cbq_class *cl = cbq_classify(skb, sch, &ret);
374
c3bc7cff 375#ifdef CONFIG_NET_CLS_ACT
1da177e4
LT
376 q->rx_class = cl;
377#endif
378 if (cl == NULL) {
c27f339a 379 if (ret & __NET_XMIT_BYPASS)
25331d6c 380 qdisc_qstats_drop(sch);
1da177e4
LT
381 kfree_skb(skb);
382 return ret;
383 }
384
c3bc7cff 385#ifdef CONFIG_NET_CLS_ACT
1da177e4
LT
386 cl->q->__parent = sch;
387#endif
5f86173b
JK
388 ret = qdisc_enqueue(skb, cl->q);
389 if (ret == NET_XMIT_SUCCESS) {
1da177e4 390 sch->q.qlen++;
1da177e4
LT
391 cbq_mark_toplevel(q, cl);
392 if (!cl->next_alive)
393 cbq_activate_class(cl);
394 return ret;
395 }
396
378a2f09 397 if (net_xmit_drop_count(ret)) {
25331d6c 398 qdisc_qstats_drop(sch);
378a2f09
JP
399 cbq_mark_toplevel(q, cl);
400 cl->qstats.drops++;
401 }
1da177e4
LT
402 return ret;
403}
404
1da177e4
LT
405/* Overlimit actions */
406
407/* TC_CBQ_OVL_CLASSIC: (default) penalize leaf class by adding offtime */
408
409static void cbq_ovl_classic(struct cbq_class *cl)
410{
411 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
8edc0c31 412 psched_tdiff_t delay = cl->undertime - q->now;
1da177e4
LT
413
414 if (!cl->delayed) {
415 delay += cl->offtime;
416
10297b99 417 /*
cc7ec456
ED
418 * Class goes to sleep, so that it will have no
419 * chance to work avgidle. Let's forgive it 8)
420 *
421 * BTW cbq-2.0 has a crap in this
422 * place, apparently they forgot to shift it by cl->ewma_log.
1da177e4
LT
423 */
424 if (cl->avgidle < 0)
425 delay -= (-cl->avgidle) - ((-cl->avgidle) >> cl->ewma_log);
426 if (cl->avgidle < cl->minidle)
427 cl->avgidle = cl->minidle;
428 if (delay <= 0)
429 delay = 1;
7c59e25f 430 cl->undertime = q->now + delay;
1da177e4
LT
431
432 cl->xstats.overactions++;
433 cl->delayed = 1;
434 }
435 if (q->wd_expires == 0 || q->wd_expires > delay)
436 q->wd_expires = delay;
437
438 /* Dirty work! We must schedule wakeups based on
cc7ec456
ED
439 * real available rate, rather than leaf rate,
440 * which may be tiny (even zero).
1da177e4
LT
441 */
442 if (q->toplevel == TC_CBQ_MAXLEVEL) {
443 struct cbq_class *b;
444 psched_tdiff_t base_delay = q->wd_expires;
445
446 for (b = cl->borrow; b; b = b->borrow) {
8edc0c31 447 delay = b->undertime - q->now;
1da177e4
LT
448 if (delay < base_delay) {
449 if (delay <= 0)
450 delay = 1;
451 base_delay = delay;
452 }
453 }
454
455 q->wd_expires = base_delay;
456 }
457}
458
459/* TC_CBQ_OVL_RCLASSIC: penalize by offtime classes in hierarchy, when
cc7ec456 460 * they go overlimit
1da177e4
LT
461 */
462
463static void cbq_ovl_rclassic(struct cbq_class *cl)
464{
465 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
466 struct cbq_class *this = cl;
467
468 do {
469 if (cl->level > q->toplevel) {
470 cl = NULL;
471 break;
472 }
473 } while ((cl = cl->borrow) != NULL);
474
475 if (cl == NULL)
476 cl = this;
477 cbq_ovl_classic(cl);
478}
479
480/* TC_CBQ_OVL_DELAY: delay until it will go to underlimit */
481
482static void cbq_ovl_delay(struct cbq_class *cl)
483{
484 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
8edc0c31 485 psched_tdiff_t delay = cl->undertime - q->now;
1da177e4 486
2540e051
JP
487 if (test_bit(__QDISC_STATE_DEACTIVATED,
488 &qdisc_root_sleeping(cl->qdisc)->state))
489 return;
490
1da177e4 491 if (!cl->delayed) {
1a13cb63
PM
492 psched_time_t sched = q->now;
493 ktime_t expires;
1da177e4
LT
494
495 delay += cl->offtime;
496 if (cl->avgidle < 0)
497 delay -= (-cl->avgidle) - ((-cl->avgidle) >> cl->ewma_log);
498 if (cl->avgidle < cl->minidle)
499 cl->avgidle = cl->minidle;
7c59e25f 500 cl->undertime = q->now + delay;
1da177e4
LT
501
502 if (delay > 0) {
1a13cb63 503 sched += delay + cl->penalty;
1da177e4
LT
504 cl->penalized = sched;
505 cl->cpriority = TC_CBQ_MAXPRIO;
506 q->pmask |= (1<<TC_CBQ_MAXPRIO);
1a13cb63 507
46baac38 508 expires = ns_to_ktime(PSCHED_TICKS2NS(sched));
2fbd3da3
DM
509 if (hrtimer_try_to_cancel(&q->delay_timer) &&
510 ktime_to_ns(ktime_sub(
511 hrtimer_get_expires(&q->delay_timer),
512 expires)) > 0)
513 hrtimer_set_expires(&q->delay_timer, expires);
514 hrtimer_restart(&q->delay_timer);
1da177e4
LT
515 cl->delayed = 1;
516 cl->xstats.overactions++;
517 return;
518 }
519 delay = 1;
520 }
521 if (q->wd_expires == 0 || q->wd_expires > delay)
522 q->wd_expires = delay;
523}
524
525/* TC_CBQ_OVL_LOWPRIO: penalize class by lowering its priority band */
526
527static void cbq_ovl_lowprio(struct cbq_class *cl)
528{
529 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
530
1a13cb63 531 cl->penalized = q->now + cl->penalty;
1da177e4
LT
532
533 if (cl->cpriority != cl->priority2) {
534 cl->cpriority = cl->priority2;
535 q->pmask |= (1<<cl->cpriority);
536 cl->xstats.overactions++;
537 }
538 cbq_ovl_classic(cl);
539}
540
541/* TC_CBQ_OVL_DROP: penalize class by dropping */
542
543static void cbq_ovl_drop(struct cbq_class *cl)
544{
545 if (cl->q->ops->drop)
546 if (cl->q->ops->drop(cl->q))
547 cl->qdisc->q.qlen--;
548 cl->xstats.overactions++;
549 cbq_ovl_classic(cl);
550}
551
1a13cb63
PM
552static psched_tdiff_t cbq_undelay_prio(struct cbq_sched_data *q, int prio,
553 psched_time_t now)
1da177e4
LT
554{
555 struct cbq_class *cl;
556 struct cbq_class *cl_prev = q->active[prio];
1a13cb63 557 psched_time_t sched = now;
1da177e4
LT
558
559 if (cl_prev == NULL)
e9054a33 560 return 0;
1da177e4
LT
561
562 do {
563 cl = cl_prev->next_alive;
1a13cb63 564 if (now - cl->penalized > 0) {
1da177e4
LT
565 cl_prev->next_alive = cl->next_alive;
566 cl->next_alive = NULL;
567 cl->cpriority = cl->priority;
568 cl->delayed = 0;
569 cbq_activate_class(cl);
570
571 if (cl == q->active[prio]) {
572 q->active[prio] = cl_prev;
573 if (cl == q->active[prio]) {
574 q->active[prio] = NULL;
575 return 0;
576 }
577 }
578
579 cl = cl_prev->next_alive;
1a13cb63 580 } else if (sched - cl->penalized > 0)
1da177e4
LT
581 sched = cl->penalized;
582 } while ((cl_prev = cl) != q->active[prio]);
583
1a13cb63 584 return sched - now;
1da177e4
LT
585}
586
1a13cb63 587static enum hrtimer_restart cbq_undelay(struct hrtimer *timer)
1da177e4 588{
1a13cb63 589 struct cbq_sched_data *q = container_of(timer, struct cbq_sched_data,
2fbd3da3 590 delay_timer);
1a13cb63
PM
591 struct Qdisc *sch = q->watchdog.qdisc;
592 psched_time_t now;
593 psched_tdiff_t delay = 0;
cc7ec456 594 unsigned int pmask;
1da177e4 595
3bebcda2 596 now = psched_get_time();
1a13cb63 597
1da177e4
LT
598 pmask = q->pmask;
599 q->pmask = 0;
600
601 while (pmask) {
602 int prio = ffz(~pmask);
1a13cb63 603 psched_tdiff_t tmp;
1da177e4
LT
604
605 pmask &= ~(1<<prio);
606
1a13cb63 607 tmp = cbq_undelay_prio(q, prio, now);
1da177e4
LT
608 if (tmp > 0) {
609 q->pmask |= 1<<prio;
610 if (tmp < delay || delay == 0)
611 delay = tmp;
612 }
613 }
614
615 if (delay) {
1a13cb63
PM
616 ktime_t time;
617
618 time = ktime_set(0, 0);
ca44d6e6 619 time = ktime_add_ns(time, PSCHED_TICKS2NS(now + delay));
4a8e320c 620 hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS_PINNED);
1da177e4
LT
621 }
622
fd245a4a 623 qdisc_unthrottled(sch);
8608db03 624 __netif_schedule(qdisc_root(sch));
1a13cb63 625 return HRTIMER_NORESTART;
1da177e4
LT
626}
627
c3bc7cff 628#ifdef CONFIG_NET_CLS_ACT
1da177e4
LT
629static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
630{
1da177e4
LT
631 struct Qdisc *sch = child->__parent;
632 struct cbq_sched_data *q = qdisc_priv(sch);
633 struct cbq_class *cl = q->rx_class;
634
635 q->rx_class = NULL;
636
637 if (cl && (cl = cbq_reclassify(skb, cl)) != NULL) {
378a2f09 638 int ret;
1da177e4
LT
639
640 cbq_mark_toplevel(q, cl);
641
642 q->rx_class = cl;
643 cl->q->__parent = sch;
644
378a2f09
JP
645 ret = qdisc_enqueue(skb, cl->q);
646 if (ret == NET_XMIT_SUCCESS) {
1da177e4 647 sch->q.qlen++;
1da177e4
LT
648 if (!cl->next_alive)
649 cbq_activate_class(cl);
650 return 0;
651 }
378a2f09 652 if (net_xmit_drop_count(ret))
25331d6c 653 qdisc_qstats_drop(sch);
1da177e4
LT
654 return 0;
655 }
656
25331d6c 657 qdisc_qstats_drop(sch);
1da177e4
LT
658 return -1;
659}
660#endif
661
10297b99 662/*
cc7ec456
ED
663 * It is mission critical procedure.
664 *
665 * We "regenerate" toplevel cutoff, if transmitting class
666 * has backlog and it is not regulated. It is not part of
667 * original CBQ description, but looks more reasonable.
668 * Probably, it is wrong. This question needs further investigation.
669 */
1da177e4 670
cc7ec456 671static inline void
1da177e4
LT
672cbq_update_toplevel(struct cbq_sched_data *q, struct cbq_class *cl,
673 struct cbq_class *borrowed)
674{
675 if (cl && q->toplevel >= borrowed->level) {
676 if (cl->q->q.qlen > 1) {
677 do {
a084980d 678 if (borrowed->undertime == PSCHED_PASTPERFECT) {
1da177e4
LT
679 q->toplevel = borrowed->level;
680 return;
681 }
cc7ec456 682 } while ((borrowed = borrowed->borrow) != NULL);
1da177e4 683 }
10297b99 684#if 0
1da177e4
LT
685 /* It is not necessary now. Uncommenting it
686 will save CPU cycles, but decrease fairness.
687 */
688 q->toplevel = TC_CBQ_MAXLEVEL;
689#endif
690 }
691}
692
693static void
694cbq_update(struct cbq_sched_data *q)
695{
696 struct cbq_class *this = q->tx_class;
697 struct cbq_class *cl = this;
698 int len = q->tx_len;
73d0f37a 699 psched_time_t now;
1da177e4
LT
700
701 q->tx_class = NULL;
73d0f37a
VA
702 /* Time integrator. We calculate EOS time
703 * by adding expected packet transmission time.
704 */
705 now = q->now + L2T(&q->link, len);
1da177e4
LT
706
707 for ( ; cl; cl = cl->share) {
708 long avgidle = cl->avgidle;
709 long idle;
710
711 cl->bstats.packets++;
712 cl->bstats.bytes += len;
713
714 /*
cc7ec456
ED
715 * (now - last) is total time between packet right edges.
716 * (last_pktlen/rate) is "virtual" busy time, so that
717 *
718 * idle = (now - last) - last_pktlen/rate
1da177e4
LT
719 */
720
73d0f37a 721 idle = now - cl->last;
1da177e4
LT
722 if ((unsigned long)idle > 128*1024*1024) {
723 avgidle = cl->maxidle;
724 } else {
725 idle -= L2T(cl, len);
726
727 /* true_avgidle := (1-W)*true_avgidle + W*idle,
cc7ec456
ED
728 * where W=2^{-ewma_log}. But cl->avgidle is scaled:
729 * cl->avgidle == true_avgidle/W,
730 * hence:
1da177e4
LT
731 */
732 avgidle += idle - (avgidle>>cl->ewma_log);
733 }
734
735 if (avgidle <= 0) {
736 /* Overlimit or at-limit */
737
738 if (avgidle < cl->minidle)
739 avgidle = cl->minidle;
740
741 cl->avgidle = avgidle;
742
743 /* Calculate expected time, when this class
cc7ec456
ED
744 * will be allowed to send.
745 * It will occur, when:
746 * (1-W)*true_avgidle + W*delay = 0, i.e.
747 * idle = (1/W - 1)*(-true_avgidle)
748 * or
749 * idle = (1 - W)*(-cl->avgidle);
1da177e4
LT
750 */
751 idle = (-avgidle) - ((-avgidle) >> cl->ewma_log);
752
753 /*
cc7ec456
ED
754 * That is not all.
755 * To maintain the rate allocated to the class,
756 * we add to undertime virtual clock,
757 * necessary to complete transmitted packet.
758 * (len/phys_bandwidth has been already passed
759 * to the moment of cbq_update)
1da177e4
LT
760 */
761
762 idle -= L2T(&q->link, len);
763 idle += L2T(cl, len);
764
73d0f37a 765 cl->undertime = now + idle;
1da177e4
LT
766 } else {
767 /* Underlimit */
768
a084980d 769 cl->undertime = PSCHED_PASTPERFECT;
1da177e4
LT
770 if (avgidle > cl->maxidle)
771 cl->avgidle = cl->maxidle;
772 else
773 cl->avgidle = avgidle;
774 }
73d0f37a
VA
775 if ((s64)(now - cl->last) > 0)
776 cl->last = now;
1da177e4
LT
777 }
778
779 cbq_update_toplevel(q, this, q->tx_borrowed);
780}
781
cc7ec456 782static inline struct cbq_class *
1da177e4
LT
783cbq_under_limit(struct cbq_class *cl)
784{
785 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
786 struct cbq_class *this_cl = cl;
787
788 if (cl->tparent == NULL)
789 return cl;
790
a084980d 791 if (cl->undertime == PSCHED_PASTPERFECT || q->now >= cl->undertime) {
1da177e4
LT
792 cl->delayed = 0;
793 return cl;
794 }
795
796 do {
797 /* It is very suspicious place. Now overlimit
cc7ec456
ED
798 * action is generated for not bounded classes
799 * only if link is completely congested.
800 * Though it is in agree with ancestor-only paradigm,
801 * it looks very stupid. Particularly,
802 * it means that this chunk of code will either
803 * never be called or result in strong amplification
804 * of burstiness. Dangerous, silly, and, however,
805 * no another solution exists.
1da177e4 806 */
cc7ec456
ED
807 cl = cl->borrow;
808 if (!cl) {
1da177e4
LT
809 this_cl->qstats.overlimits++;
810 this_cl->overlimit(this_cl);
811 return NULL;
812 }
813 if (cl->level > q->toplevel)
814 return NULL;
a084980d 815 } while (cl->undertime != PSCHED_PASTPERFECT && q->now < cl->undertime);
1da177e4
LT
816
817 cl->delayed = 0;
818 return cl;
819}
820
cc7ec456 821static inline struct sk_buff *
1da177e4
LT
822cbq_dequeue_prio(struct Qdisc *sch, int prio)
823{
824 struct cbq_sched_data *q = qdisc_priv(sch);
825 struct cbq_class *cl_tail, *cl_prev, *cl;
826 struct sk_buff *skb;
827 int deficit;
828
829 cl_tail = cl_prev = q->active[prio];
830 cl = cl_prev->next_alive;
831
832 do {
833 deficit = 0;
834
835 /* Start round */
836 do {
837 struct cbq_class *borrow = cl;
838
839 if (cl->q->q.qlen &&
840 (borrow = cbq_under_limit(cl)) == NULL)
841 goto skip_class;
842
843 if (cl->deficit <= 0) {
844 /* Class exhausted its allotment per
cc7ec456 845 * this round. Switch to the next one.
1da177e4
LT
846 */
847 deficit = 1;
848 cl->deficit += cl->quantum;
849 goto next_class;
850 }
851
852 skb = cl->q->dequeue(cl->q);
853
854 /* Class did not give us any skb :-(
cc7ec456
ED
855 * It could occur even if cl->q->q.qlen != 0
856 * f.e. if cl->q == "tbf"
1da177e4
LT
857 */
858 if (skb == NULL)
859 goto skip_class;
860
0abf77e5 861 cl->deficit -= qdisc_pkt_len(skb);
1da177e4
LT
862 q->tx_class = cl;
863 q->tx_borrowed = borrow;
864 if (borrow != cl) {
865#ifndef CBQ_XSTATS_BORROWS_BYTES
866 borrow->xstats.borrows++;
867 cl->xstats.borrows++;
868#else
0abf77e5
JK
869 borrow->xstats.borrows += qdisc_pkt_len(skb);
870 cl->xstats.borrows += qdisc_pkt_len(skb);
1da177e4
LT
871#endif
872 }
0abf77e5 873 q->tx_len = qdisc_pkt_len(skb);
1da177e4
LT
874
875 if (cl->deficit <= 0) {
876 q->active[prio] = cl;
877 cl = cl->next_alive;
878 cl->deficit += cl->quantum;
879 }
880 return skb;
881
882skip_class:
883 if (cl->q->q.qlen == 0 || prio != cl->cpriority) {
884 /* Class is empty or penalized.
cc7ec456 885 * Unlink it from active chain.
1da177e4
LT
886 */
887 cl_prev->next_alive = cl->next_alive;
888 cl->next_alive = NULL;
889
890 /* Did cl_tail point to it? */
891 if (cl == cl_tail) {
892 /* Repair it! */
893 cl_tail = cl_prev;
894
895 /* Was it the last class in this band? */
896 if (cl == cl_tail) {
897 /* Kill the band! */
898 q->active[prio] = NULL;
899 q->activemask &= ~(1<<prio);
900 if (cl->q->q.qlen)
901 cbq_activate_class(cl);
902 return NULL;
903 }
904
905 q->active[prio] = cl_tail;
906 }
907 if (cl->q->q.qlen)
908 cbq_activate_class(cl);
909
910 cl = cl_prev;
911 }
912
913next_class:
914 cl_prev = cl;
915 cl = cl->next_alive;
916 } while (cl_prev != cl_tail);
917 } while (deficit);
918
919 q->active[prio] = cl_prev;
920
921 return NULL;
922}
923
cc7ec456 924static inline struct sk_buff *
1da177e4
LT
925cbq_dequeue_1(struct Qdisc *sch)
926{
927 struct cbq_sched_data *q = qdisc_priv(sch);
928 struct sk_buff *skb;
cc7ec456 929 unsigned int activemask;
1da177e4 930
cc7ec456 931 activemask = q->activemask & 0xFF;
1da177e4
LT
932 while (activemask) {
933 int prio = ffz(~activemask);
934 activemask &= ~(1<<prio);
935 skb = cbq_dequeue_prio(sch, prio);
936 if (skb)
937 return skb;
938 }
939 return NULL;
940}
941
942static struct sk_buff *
943cbq_dequeue(struct Qdisc *sch)
944{
945 struct sk_buff *skb;
946 struct cbq_sched_data *q = qdisc_priv(sch);
947 psched_time_t now;
1da177e4 948
3bebcda2 949 now = psched_get_time();
73d0f37a
VA
950
951 if (q->tx_class)
1da177e4 952 cbq_update(q);
73d0f37a
VA
953
954 q->now = now;
1da177e4
LT
955
956 for (;;) {
957 q->wd_expires = 0;
958
959 skb = cbq_dequeue_1(sch);
960 if (skb) {
9190b3b3 961 qdisc_bstats_update(sch, skb);
1da177e4 962 sch->q.qlen--;
fd245a4a 963 qdisc_unthrottled(sch);
1da177e4
LT
964 return skb;
965 }
966
967 /* All the classes are overlimit.
cc7ec456
ED
968 *
969 * It is possible, if:
970 *
971 * 1. Scheduler is empty.
972 * 2. Toplevel cutoff inhibited borrowing.
973 * 3. Root class is overlimit.
974 *
975 * Reset 2d and 3d conditions and retry.
976 *
977 * Note, that NS and cbq-2.0 are buggy, peeking
978 * an arbitrary class is appropriate for ancestor-only
979 * sharing, but not for toplevel algorithm.
980 *
981 * Our version is better, but slower, because it requires
982 * two passes, but it is unavoidable with top-level sharing.
983 */
1da177e4
LT
984
985 if (q->toplevel == TC_CBQ_MAXLEVEL &&
a084980d 986 q->link.undertime == PSCHED_PASTPERFECT)
1da177e4
LT
987 break;
988
989 q->toplevel = TC_CBQ_MAXLEVEL;
a084980d 990 q->link.undertime = PSCHED_PASTPERFECT;
1da177e4
LT
991 }
992
993 /* No packets in scheduler or nobody wants to give them to us :-(
cc7ec456
ED
994 * Sigh... start watchdog timer in the last case.
995 */
1da177e4
LT
996
997 if (sch->q.qlen) {
25331d6c 998 qdisc_qstats_overlimit(sch);
88a99354
PM
999 if (q->wd_expires)
1000 qdisc_watchdog_schedule(&q->watchdog,
bb239acf 1001 now + q->wd_expires);
1da177e4
LT
1002 }
1003 return NULL;
1004}
1005
1006/* CBQ class maintanance routines */
1007
1008static void cbq_adjust_levels(struct cbq_class *this)
1009{
1010 if (this == NULL)
1011 return;
1012
1013 do {
1014 int level = 0;
1015 struct cbq_class *cl;
1016
cc7ec456
ED
1017 cl = this->children;
1018 if (cl) {
1da177e4
LT
1019 do {
1020 if (cl->level > level)
1021 level = cl->level;
1022 } while ((cl = cl->sibling) != this->children);
1023 }
cc7ec456 1024 this->level = level + 1;
1da177e4
LT
1025 } while ((this = this->tparent) != NULL);
1026}
1027
1028static void cbq_normalize_quanta(struct cbq_sched_data *q, int prio)
1029{
1030 struct cbq_class *cl;
d77fea2e 1031 unsigned int h;
1da177e4
LT
1032
1033 if (q->quanta[prio] == 0)
1034 return;
1035
d77fea2e 1036 for (h = 0; h < q->clhash.hashsize; h++) {
b67bfe0d 1037 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
1da177e4 1038 /* BUGGGG... Beware! This expression suffer of
cc7ec456 1039 * arithmetic overflows!
1da177e4
LT
1040 */
1041 if (cl->priority == prio) {
1042 cl->quantum = (cl->weight*cl->allot*q->nclasses[prio])/
1043 q->quanta[prio];
1044 }
833fa743
YY
1045 if (cl->quantum <= 0 ||
1046 cl->quantum > 32*qdisc_dev(cl->qdisc)->mtu) {
c17988a9
YY
1047 pr_warn("CBQ: class %08x has bad quantum==%ld, repaired.\n",
1048 cl->common.classid, cl->quantum);
5ce2d488 1049 cl->quantum = qdisc_dev(cl->qdisc)->mtu/2 + 1;
1da177e4
LT
1050 }
1051 }
1052 }
1053}
1054
1055static void cbq_sync_defmap(struct cbq_class *cl)
1056{
1057 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
1058 struct cbq_class *split = cl->split;
cc7ec456 1059 unsigned int h;
1da177e4
LT
1060 int i;
1061
1062 if (split == NULL)
1063 return;
1064
cc7ec456
ED
1065 for (i = 0; i <= TC_PRIO_MAX; i++) {
1066 if (split->defaults[i] == cl && !(cl->defmap & (1<<i)))
1da177e4
LT
1067 split->defaults[i] = NULL;
1068 }
1069
cc7ec456 1070 for (i = 0; i <= TC_PRIO_MAX; i++) {
1da177e4
LT
1071 int level = split->level;
1072
1073 if (split->defaults[i])
1074 continue;
1075
d77fea2e 1076 for (h = 0; h < q->clhash.hashsize; h++) {
1da177e4
LT
1077 struct cbq_class *c;
1078
b67bfe0d 1079 hlist_for_each_entry(c, &q->clhash.hash[h],
d77fea2e 1080 common.hnode) {
1da177e4 1081 if (c->split == split && c->level < level &&
cc7ec456 1082 c->defmap & (1<<i)) {
1da177e4
LT
1083 split->defaults[i] = c;
1084 level = c->level;
1085 }
1086 }
1087 }
1088 }
1089}
1090
1091static void cbq_change_defmap(struct cbq_class *cl, u32 splitid, u32 def, u32 mask)
1092{
1093 struct cbq_class *split = NULL;
1094
1095 if (splitid == 0) {
cc7ec456
ED
1096 split = cl->split;
1097 if (!split)
1da177e4 1098 return;
d77fea2e 1099 splitid = split->common.classid;
1da177e4
LT
1100 }
1101
d77fea2e 1102 if (split == NULL || split->common.classid != splitid) {
1da177e4 1103 for (split = cl->tparent; split; split = split->tparent)
d77fea2e 1104 if (split->common.classid == splitid)
1da177e4
LT
1105 break;
1106 }
1107
1108 if (split == NULL)
1109 return;
1110
1111 if (cl->split != split) {
1112 cl->defmap = 0;
1113 cbq_sync_defmap(cl);
1114 cl->split = split;
cc7ec456 1115 cl->defmap = def & mask;
1da177e4 1116 } else
cc7ec456 1117 cl->defmap = (cl->defmap & ~mask) | (def & mask);
1da177e4
LT
1118
1119 cbq_sync_defmap(cl);
1120}
1121
1122static void cbq_unlink_class(struct cbq_class *this)
1123{
1124 struct cbq_class *cl, **clp;
1125 struct cbq_sched_data *q = qdisc_priv(this->qdisc);
1126
d77fea2e 1127 qdisc_class_hash_remove(&q->clhash, &this->common);
1da177e4
LT
1128
1129 if (this->tparent) {
cc7ec456 1130 clp = &this->sibling;
1da177e4
LT
1131 cl = *clp;
1132 do {
1133 if (cl == this) {
1134 *clp = cl->sibling;
1135 break;
1136 }
1137 clp = &cl->sibling;
1138 } while ((cl = *clp) != this->sibling);
1139
1140 if (this->tparent->children == this) {
1141 this->tparent->children = this->sibling;
1142 if (this->sibling == this)
1143 this->tparent->children = NULL;
1144 }
1145 } else {
547b792c 1146 WARN_ON(this->sibling != this);
1da177e4
LT
1147 }
1148}
1149
1150static void cbq_link_class(struct cbq_class *this)
1151{
1152 struct cbq_sched_data *q = qdisc_priv(this->qdisc);
1da177e4
LT
1153 struct cbq_class *parent = this->tparent;
1154
1155 this->sibling = this;
d77fea2e 1156 qdisc_class_hash_insert(&q->clhash, &this->common);
1da177e4
LT
1157
1158 if (parent == NULL)
1159 return;
1160
1161 if (parent->children == NULL) {
1162 parent->children = this;
1163 } else {
1164 this->sibling = parent->children->sibling;
1165 parent->children->sibling = this;
1166 }
1167}
1168
cc7ec456 1169static unsigned int cbq_drop(struct Qdisc *sch)
1da177e4
LT
1170{
1171 struct cbq_sched_data *q = qdisc_priv(sch);
1172 struct cbq_class *cl, *cl_head;
1173 int prio;
1174 unsigned int len;
1175
1176 for (prio = TC_CBQ_MAXPRIO; prio >= 0; prio--) {
cc7ec456
ED
1177 cl_head = q->active[prio];
1178 if (!cl_head)
1da177e4
LT
1179 continue;
1180
1181 cl = cl_head;
1182 do {
1183 if (cl->q->ops->drop && (len = cl->q->ops->drop(cl->q))) {
1184 sch->q.qlen--;
a37ef2e3
JP
1185 if (!cl->q->q.qlen)
1186 cbq_deactivate_class(cl);
1da177e4
LT
1187 return len;
1188 }
1189 } while ((cl = cl->next_alive) != cl_head);
1190 }
1191 return 0;
1192}
1193
1194static void
cc7ec456 1195cbq_reset(struct Qdisc *sch)
1da177e4
LT
1196{
1197 struct cbq_sched_data *q = qdisc_priv(sch);
1198 struct cbq_class *cl;
1199 int prio;
cc7ec456 1200 unsigned int h;
1da177e4
LT
1201
1202 q->activemask = 0;
1203 q->pmask = 0;
1204 q->tx_class = NULL;
1205 q->tx_borrowed = NULL;
88a99354 1206 qdisc_watchdog_cancel(&q->watchdog);
2fbd3da3 1207 hrtimer_cancel(&q->delay_timer);
1da177e4 1208 q->toplevel = TC_CBQ_MAXLEVEL;
3bebcda2 1209 q->now = psched_get_time();
1da177e4
LT
1210
1211 for (prio = 0; prio <= TC_CBQ_MAXPRIO; prio++)
1212 q->active[prio] = NULL;
1213
d77fea2e 1214 for (h = 0; h < q->clhash.hashsize; h++) {
b67bfe0d 1215 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
1da177e4
LT
1216 qdisc_reset(cl->q);
1217
1218 cl->next_alive = NULL;
a084980d 1219 cl->undertime = PSCHED_PASTPERFECT;
1da177e4
LT
1220 cl->avgidle = cl->maxidle;
1221 cl->deficit = cl->quantum;
1222 cl->cpriority = cl->priority;
1223 }
1224 }
1225 sch->q.qlen = 0;
1226}
1227
1228
1229static int cbq_set_lss(struct cbq_class *cl, struct tc_cbq_lssopt *lss)
1230{
cc7ec456
ED
1231 if (lss->change & TCF_CBQ_LSS_FLAGS) {
1232 cl->share = (lss->flags & TCF_CBQ_LSS_ISOLATED) ? NULL : cl->tparent;
1233 cl->borrow = (lss->flags & TCF_CBQ_LSS_BOUNDED) ? NULL : cl->tparent;
1da177e4 1234 }
cc7ec456 1235 if (lss->change & TCF_CBQ_LSS_EWMA)
1da177e4 1236 cl->ewma_log = lss->ewma_log;
cc7ec456 1237 if (lss->change & TCF_CBQ_LSS_AVPKT)
1da177e4 1238 cl->avpkt = lss->avpkt;
cc7ec456 1239 if (lss->change & TCF_CBQ_LSS_MINIDLE)
1da177e4 1240 cl->minidle = -(long)lss->minidle;
cc7ec456 1241 if (lss->change & TCF_CBQ_LSS_MAXIDLE) {
1da177e4
LT
1242 cl->maxidle = lss->maxidle;
1243 cl->avgidle = lss->maxidle;
1244 }
cc7ec456 1245 if (lss->change & TCF_CBQ_LSS_OFFTIME)
1da177e4
LT
1246 cl->offtime = lss->offtime;
1247 return 0;
1248}
1249
1250static void cbq_rmprio(struct cbq_sched_data *q, struct cbq_class *cl)
1251{
1252 q->nclasses[cl->priority]--;
1253 q->quanta[cl->priority] -= cl->weight;
1254 cbq_normalize_quanta(q, cl->priority);
1255}
1256
1257static void cbq_addprio(struct cbq_sched_data *q, struct cbq_class *cl)
1258{
1259 q->nclasses[cl->priority]++;
1260 q->quanta[cl->priority] += cl->weight;
1261 cbq_normalize_quanta(q, cl->priority);
1262}
1263
1264static int cbq_set_wrr(struct cbq_class *cl, struct tc_cbq_wrropt *wrr)
1265{
1266 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
1267
1268 if (wrr->allot)
1269 cl->allot = wrr->allot;
1270 if (wrr->weight)
1271 cl->weight = wrr->weight;
1272 if (wrr->priority) {
cc7ec456 1273 cl->priority = wrr->priority - 1;
1da177e4
LT
1274 cl->cpriority = cl->priority;
1275 if (cl->priority >= cl->priority2)
cc7ec456 1276 cl->priority2 = TC_CBQ_MAXPRIO - 1;
1da177e4
LT
1277 }
1278
1279 cbq_addprio(q, cl);
1280 return 0;
1281}
1282
1283static int cbq_set_overlimit(struct cbq_class *cl, struct tc_cbq_ovl *ovl)
1284{
1285 switch (ovl->strategy) {
1286 case TC_CBQ_OVL_CLASSIC:
1287 cl->overlimit = cbq_ovl_classic;
1288 break;
1289 case TC_CBQ_OVL_DELAY:
1290 cl->overlimit = cbq_ovl_delay;
1291 break;
1292 case TC_CBQ_OVL_LOWPRIO:
cc7ec456
ED
1293 if (ovl->priority2 - 1 >= TC_CBQ_MAXPRIO ||
1294 ovl->priority2 - 1 <= cl->priority)
1da177e4 1295 return -EINVAL;
cc7ec456 1296 cl->priority2 = ovl->priority2 - 1;
1da177e4
LT
1297 cl->overlimit = cbq_ovl_lowprio;
1298 break;
1299 case TC_CBQ_OVL_DROP:
1300 cl->overlimit = cbq_ovl_drop;
1301 break;
1302 case TC_CBQ_OVL_RCLASSIC:
1303 cl->overlimit = cbq_ovl_rclassic;
1304 break;
1305 default:
1306 return -EINVAL;
1307 }
1a13cb63 1308 cl->penalty = ovl->penalty;
1da177e4
LT
1309 return 0;
1310}
1311
c3bc7cff 1312#ifdef CONFIG_NET_CLS_ACT
1da177e4
LT
1313static int cbq_set_police(struct cbq_class *cl, struct tc_cbq_police *p)
1314{
1315 cl->police = p->police;
1316
1317 if (cl->q->handle) {
1318 if (p->police == TC_POLICE_RECLASSIFY)
1319 cl->q->reshape_fail = cbq_reshape_fail;
1320 else
1321 cl->q->reshape_fail = NULL;
1322 }
1323 return 0;
1324}
1325#endif
1326
1327static int cbq_set_fopt(struct cbq_class *cl, struct tc_cbq_fopt *fopt)
1328{
1329 cbq_change_defmap(cl, fopt->split, fopt->defmap, fopt->defchange);
1330 return 0;
1331}
1332
27a3421e
PM
1333static const struct nla_policy cbq_policy[TCA_CBQ_MAX + 1] = {
1334 [TCA_CBQ_LSSOPT] = { .len = sizeof(struct tc_cbq_lssopt) },
1335 [TCA_CBQ_WRROPT] = { .len = sizeof(struct tc_cbq_wrropt) },
1336 [TCA_CBQ_FOPT] = { .len = sizeof(struct tc_cbq_fopt) },
1337 [TCA_CBQ_OVL_STRATEGY] = { .len = sizeof(struct tc_cbq_ovl) },
1338 [TCA_CBQ_RATE] = { .len = sizeof(struct tc_ratespec) },
1339 [TCA_CBQ_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
1340 [TCA_CBQ_POLICE] = { .len = sizeof(struct tc_cbq_police) },
1341};
1342
1e90474c 1343static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
1da177e4
LT
1344{
1345 struct cbq_sched_data *q = qdisc_priv(sch);
1e90474c 1346 struct nlattr *tb[TCA_CBQ_MAX + 1];
1da177e4 1347 struct tc_ratespec *r;
cee63723
PM
1348 int err;
1349
27a3421e 1350 err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy);
cee63723
PM
1351 if (err < 0)
1352 return err;
1da177e4 1353
27a3421e 1354 if (tb[TCA_CBQ_RTAB] == NULL || tb[TCA_CBQ_RATE] == NULL)
1da177e4
LT
1355 return -EINVAL;
1356
1e90474c 1357 r = nla_data(tb[TCA_CBQ_RATE]);
1da177e4 1358
1e90474c 1359 if ((q->link.R_tab = qdisc_get_rtab(r, tb[TCA_CBQ_RTAB])) == NULL)
1da177e4
LT
1360 return -EINVAL;
1361
d77fea2e
PM
1362 err = qdisc_class_hash_init(&q->clhash);
1363 if (err < 0)
1364 goto put_rtab;
1365
1da177e4
LT
1366 q->link.refcnt = 1;
1367 q->link.sibling = &q->link;
d77fea2e 1368 q->link.common.classid = sch->handle;
1da177e4 1369 q->link.qdisc = sch;
3511c913
CG
1370 q->link.q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1371 sch->handle);
1372 if (!q->link.q)
1da177e4
LT
1373 q->link.q = &noop_qdisc;
1374
cc7ec456
ED
1375 q->link.priority = TC_CBQ_MAXPRIO - 1;
1376 q->link.priority2 = TC_CBQ_MAXPRIO - 1;
1377 q->link.cpriority = TC_CBQ_MAXPRIO - 1;
1da177e4
LT
1378 q->link.ovl_strategy = TC_CBQ_OVL_CLASSIC;
1379 q->link.overlimit = cbq_ovl_classic;
5ce2d488 1380 q->link.allot = psched_mtu(qdisc_dev(sch));
1da177e4
LT
1381 q->link.quantum = q->link.allot;
1382 q->link.weight = q->link.R_tab->rate.rate;
1383
1384 q->link.ewma_log = TC_CBQ_DEF_EWMA;
1385 q->link.avpkt = q->link.allot/2;
1386 q->link.minidle = -0x7FFFFFFF;
1da177e4 1387
88a99354 1388 qdisc_watchdog_init(&q->watchdog, sch);
4a8e320c 1389 hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
1da177e4
LT
1390 q->delay_timer.function = cbq_undelay;
1391 q->toplevel = TC_CBQ_MAXLEVEL;
3bebcda2 1392 q->now = psched_get_time();
1da177e4
LT
1393
1394 cbq_link_class(&q->link);
1395
1e90474c
PM
1396 if (tb[TCA_CBQ_LSSOPT])
1397 cbq_set_lss(&q->link, nla_data(tb[TCA_CBQ_LSSOPT]));
1da177e4
LT
1398
1399 cbq_addprio(q, &q->link);
1400 return 0;
d77fea2e
PM
1401
1402put_rtab:
1403 qdisc_put_rtab(q->link.R_tab);
1404 return err;
1da177e4
LT
1405}
1406
cc7ec456 1407static int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl)
1da177e4 1408{
27a884dc 1409 unsigned char *b = skb_tail_pointer(skb);
1da177e4 1410
1b34ec43
DM
1411 if (nla_put(skb, TCA_CBQ_RATE, sizeof(cl->R_tab->rate), &cl->R_tab->rate))
1412 goto nla_put_failure;
1da177e4
LT
1413 return skb->len;
1414
1e90474c 1415nla_put_failure:
dc5fc579 1416 nlmsg_trim(skb, b);
1da177e4
LT
1417 return -1;
1418}
1419
cc7ec456 1420static int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl)
1da177e4 1421{
27a884dc 1422 unsigned char *b = skb_tail_pointer(skb);
1da177e4
LT
1423 struct tc_cbq_lssopt opt;
1424
1425 opt.flags = 0;
1426 if (cl->borrow == NULL)
1427 opt.flags |= TCF_CBQ_LSS_BOUNDED;
1428 if (cl->share == NULL)
1429 opt.flags |= TCF_CBQ_LSS_ISOLATED;
1430 opt.ewma_log = cl->ewma_log;
1431 opt.level = cl->level;
1432 opt.avpkt = cl->avpkt;
1433 opt.maxidle = cl->maxidle;
1434 opt.minidle = (u32)(-cl->minidle);
1435 opt.offtime = cl->offtime;
1436 opt.change = ~0;
1b34ec43
DM
1437 if (nla_put(skb, TCA_CBQ_LSSOPT, sizeof(opt), &opt))
1438 goto nla_put_failure;
1da177e4
LT
1439 return skb->len;
1440
1e90474c 1441nla_put_failure:
dc5fc579 1442 nlmsg_trim(skb, b);
1da177e4
LT
1443 return -1;
1444}
1445
cc7ec456 1446static int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl)
1da177e4 1447{
27a884dc 1448 unsigned char *b = skb_tail_pointer(skb);
1da177e4
LT
1449 struct tc_cbq_wrropt opt;
1450
a0db856a 1451 memset(&opt, 0, sizeof(opt));
1da177e4
LT
1452 opt.flags = 0;
1453 opt.allot = cl->allot;
cc7ec456
ED
1454 opt.priority = cl->priority + 1;
1455 opt.cpriority = cl->cpriority + 1;
1da177e4 1456 opt.weight = cl->weight;
1b34ec43
DM
1457 if (nla_put(skb, TCA_CBQ_WRROPT, sizeof(opt), &opt))
1458 goto nla_put_failure;
1da177e4
LT
1459 return skb->len;
1460
1e90474c 1461nla_put_failure:
dc5fc579 1462 nlmsg_trim(skb, b);
1da177e4
LT
1463 return -1;
1464}
1465
cc7ec456 1466static int cbq_dump_ovl(struct sk_buff *skb, struct cbq_class *cl)
1da177e4 1467{
27a884dc 1468 unsigned char *b = skb_tail_pointer(skb);
1da177e4
LT
1469 struct tc_cbq_ovl opt;
1470
1471 opt.strategy = cl->ovl_strategy;
cc7ec456 1472 opt.priority2 = cl->priority2 + 1;
8a47077a 1473 opt.pad = 0;
1a13cb63 1474 opt.penalty = cl->penalty;
1b34ec43
DM
1475 if (nla_put(skb, TCA_CBQ_OVL_STRATEGY, sizeof(opt), &opt))
1476 goto nla_put_failure;
1da177e4
LT
1477 return skb->len;
1478
1e90474c 1479nla_put_failure:
dc5fc579 1480 nlmsg_trim(skb, b);
1da177e4
LT
1481 return -1;
1482}
1483
cc7ec456 1484static int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl)
1da177e4 1485{
27a884dc 1486 unsigned char *b = skb_tail_pointer(skb);
1da177e4
LT
1487 struct tc_cbq_fopt opt;
1488
1489 if (cl->split || cl->defmap) {
d77fea2e 1490 opt.split = cl->split ? cl->split->common.classid : 0;
1da177e4
LT
1491 opt.defmap = cl->defmap;
1492 opt.defchange = ~0;
1b34ec43
DM
1493 if (nla_put(skb, TCA_CBQ_FOPT, sizeof(opt), &opt))
1494 goto nla_put_failure;
1da177e4
LT
1495 }
1496 return skb->len;
1497
1e90474c 1498nla_put_failure:
dc5fc579 1499 nlmsg_trim(skb, b);
1da177e4
LT
1500 return -1;
1501}
1502
c3bc7cff 1503#ifdef CONFIG_NET_CLS_ACT
cc7ec456 1504static int cbq_dump_police(struct sk_buff *skb, struct cbq_class *cl)
1da177e4 1505{
27a884dc 1506 unsigned char *b = skb_tail_pointer(skb);
1da177e4
LT
1507 struct tc_cbq_police opt;
1508
1509 if (cl->police) {
1510 opt.police = cl->police;
9ef1d4c7
PM
1511 opt.__res1 = 0;
1512 opt.__res2 = 0;
1b34ec43
DM
1513 if (nla_put(skb, TCA_CBQ_POLICE, sizeof(opt), &opt))
1514 goto nla_put_failure;
1da177e4
LT
1515 }
1516 return skb->len;
1517
1e90474c 1518nla_put_failure:
dc5fc579 1519 nlmsg_trim(skb, b);
1da177e4
LT
1520 return -1;
1521}
1522#endif
1523
1524static int cbq_dump_attr(struct sk_buff *skb, struct cbq_class *cl)
1525{
1526 if (cbq_dump_lss(skb, cl) < 0 ||
1527 cbq_dump_rate(skb, cl) < 0 ||
1528 cbq_dump_wrr(skb, cl) < 0 ||
1529 cbq_dump_ovl(skb, cl) < 0 ||
c3bc7cff 1530#ifdef CONFIG_NET_CLS_ACT
1da177e4
LT
1531 cbq_dump_police(skb, cl) < 0 ||
1532#endif
1533 cbq_dump_fopt(skb, cl) < 0)
1534 return -1;
1535 return 0;
1536}
1537
1538static int cbq_dump(struct Qdisc *sch, struct sk_buff *skb)
1539{
1540 struct cbq_sched_data *q = qdisc_priv(sch);
4b3550ef 1541 struct nlattr *nest;
1da177e4 1542
4b3550ef
PM
1543 nest = nla_nest_start(skb, TCA_OPTIONS);
1544 if (nest == NULL)
1545 goto nla_put_failure;
1da177e4 1546 if (cbq_dump_attr(skb, &q->link) < 0)
1e90474c 1547 goto nla_put_failure;
d59b7d80 1548 return nla_nest_end(skb, nest);
1da177e4 1549
1e90474c 1550nla_put_failure:
4b3550ef 1551 nla_nest_cancel(skb, nest);
1da177e4
LT
1552 return -1;
1553}
1554
1555static int
1556cbq_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
1557{
1558 struct cbq_sched_data *q = qdisc_priv(sch);
1559
1560 q->link.xstats.avgidle = q->link.avgidle;
1561 return gnet_stats_copy_app(d, &q->link.xstats, sizeof(q->link.xstats));
1562}
1563
1564static int
1565cbq_dump_class(struct Qdisc *sch, unsigned long arg,
1566 struct sk_buff *skb, struct tcmsg *tcm)
1567{
cc7ec456 1568 struct cbq_class *cl = (struct cbq_class *)arg;
4b3550ef 1569 struct nlattr *nest;
1da177e4
LT
1570
1571 if (cl->tparent)
d77fea2e 1572 tcm->tcm_parent = cl->tparent->common.classid;
1da177e4
LT
1573 else
1574 tcm->tcm_parent = TC_H_ROOT;
d77fea2e 1575 tcm->tcm_handle = cl->common.classid;
1da177e4
LT
1576 tcm->tcm_info = cl->q->handle;
1577
4b3550ef
PM
1578 nest = nla_nest_start(skb, TCA_OPTIONS);
1579 if (nest == NULL)
1580 goto nla_put_failure;
1da177e4 1581 if (cbq_dump_attr(skb, cl) < 0)
1e90474c 1582 goto nla_put_failure;
d59b7d80 1583 return nla_nest_end(skb, nest);
1da177e4 1584
1e90474c 1585nla_put_failure:
4b3550ef 1586 nla_nest_cancel(skb, nest);
1da177e4
LT
1587 return -1;
1588}
1589
1590static int
1591cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
1592 struct gnet_dump *d)
1593{
1594 struct cbq_sched_data *q = qdisc_priv(sch);
cc7ec456 1595 struct cbq_class *cl = (struct cbq_class *)arg;
1da177e4 1596
1da177e4
LT
1597 cl->xstats.avgidle = cl->avgidle;
1598 cl->xstats.undertime = 0;
1599
a084980d 1600 if (cl->undertime != PSCHED_PASTPERFECT)
8edc0c31 1601 cl->xstats.undertime = cl->undertime - q->now;
1da177e4 1602
22e0f8b9 1603 if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 ||
d250a5f9 1604 gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 ||
b0ab6f92 1605 gnet_stats_copy_queue(d, NULL, &cl->qstats, cl->q->q.qlen) < 0)
1da177e4
LT
1606 return -1;
1607
1608 return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
1609}
1610
1611static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1612 struct Qdisc **old)
1613{
cc7ec456 1614 struct cbq_class *cl = (struct cbq_class *)arg;
1da177e4 1615
5b9a9ccf 1616 if (new == NULL) {
3511c913 1617 new = qdisc_create_dflt(sch->dev_queue,
5b9a9ccf
PM
1618 &pfifo_qdisc_ops, cl->common.classid);
1619 if (new == NULL)
1620 return -ENOBUFS;
1621 } else {
c3bc7cff 1622#ifdef CONFIG_NET_CLS_ACT
5b9a9ccf
PM
1623 if (cl->police == TC_POLICE_RECLASSIFY)
1624 new->reshape_fail = cbq_reshape_fail;
1da177e4 1625#endif
1da177e4 1626 }
5b9a9ccf 1627
86a7996c 1628 *old = qdisc_replace(sch, new, &cl->q);
5b9a9ccf 1629 return 0;
1da177e4
LT
1630}
1631
cc7ec456 1632static struct Qdisc *cbq_leaf(struct Qdisc *sch, unsigned long arg)
1da177e4 1633{
cc7ec456 1634 struct cbq_class *cl = (struct cbq_class *)arg;
1da177e4 1635
5b9a9ccf 1636 return cl->q;
1da177e4
LT
1637}
1638
a37ef2e3
JP
1639static void cbq_qlen_notify(struct Qdisc *sch, unsigned long arg)
1640{
1641 struct cbq_class *cl = (struct cbq_class *)arg;
1642
1643 if (cl->q->q.qlen == 0)
1644 cbq_deactivate_class(cl);
1645}
1646
1da177e4
LT
1647static unsigned long cbq_get(struct Qdisc *sch, u32 classid)
1648{
1649 struct cbq_sched_data *q = qdisc_priv(sch);
1650 struct cbq_class *cl = cbq_class_lookup(q, classid);
1651
1652 if (cl) {
1653 cl->refcnt++;
1654 return (unsigned long)cl;
1655 }
1656 return 0;
1657}
1658
1da177e4
LT
1659static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl)
1660{
1661 struct cbq_sched_data *q = qdisc_priv(sch);
1662
547b792c 1663 WARN_ON(cl->filters);
1da177e4 1664
ff31ab56 1665 tcf_destroy_chain(&cl->filter_list);
1da177e4
LT
1666 qdisc_destroy(cl->q);
1667 qdisc_put_rtab(cl->R_tab);
1da177e4 1668 gen_kill_estimator(&cl->bstats, &cl->rate_est);
1da177e4
LT
1669 if (cl != &q->link)
1670 kfree(cl);
1671}
1672
cc7ec456 1673static void cbq_destroy(struct Qdisc *sch)
1da177e4
LT
1674{
1675 struct cbq_sched_data *q = qdisc_priv(sch);
b67bfe0d 1676 struct hlist_node *next;
1da177e4 1677 struct cbq_class *cl;
cc7ec456 1678 unsigned int h;
1da177e4 1679
c3bc7cff 1680#ifdef CONFIG_NET_CLS_ACT
1da177e4
LT
1681 q->rx_class = NULL;
1682#endif
1683 /*
1684 * Filters must be destroyed first because we don't destroy the
1685 * classes from root to leafs which means that filters can still
1686 * be bound to classes which have been destroyed already. --TGR '04
1687 */
d77fea2e 1688 for (h = 0; h < q->clhash.hashsize; h++) {
b67bfe0d 1689 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode)
ff31ab56 1690 tcf_destroy_chain(&cl->filter_list);
b00b4bf9 1691 }
d77fea2e 1692 for (h = 0; h < q->clhash.hashsize; h++) {
b67bfe0d 1693 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[h],
d77fea2e 1694 common.hnode)
1da177e4 1695 cbq_destroy_class(sch, cl);
1da177e4 1696 }
d77fea2e 1697 qdisc_class_hash_destroy(&q->clhash);
1da177e4
LT
1698}
1699
1700static void cbq_put(struct Qdisc *sch, unsigned long arg)
1701{
cc7ec456 1702 struct cbq_class *cl = (struct cbq_class *)arg;
1da177e4
LT
1703
1704 if (--cl->refcnt == 0) {
c3bc7cff 1705#ifdef CONFIG_NET_CLS_ACT
102396ae 1706 spinlock_t *root_lock = qdisc_root_sleeping_lock(sch);
1da177e4
LT
1707 struct cbq_sched_data *q = qdisc_priv(sch);
1708
7698b4fc 1709 spin_lock_bh(root_lock);
1da177e4
LT
1710 if (q->rx_class == cl)
1711 q->rx_class = NULL;
7698b4fc 1712 spin_unlock_bh(root_lock);
1da177e4
LT
1713#endif
1714
1715 cbq_destroy_class(sch, cl);
1716 }
1717}
1718
1719static int
1e90474c 1720cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **tca,
1da177e4
LT
1721 unsigned long *arg)
1722{
1723 int err;
1724 struct cbq_sched_data *q = qdisc_priv(sch);
cc7ec456 1725 struct cbq_class *cl = (struct cbq_class *)*arg;
1e90474c
PM
1726 struct nlattr *opt = tca[TCA_OPTIONS];
1727 struct nlattr *tb[TCA_CBQ_MAX + 1];
1da177e4
LT
1728 struct cbq_class *parent;
1729 struct qdisc_rate_table *rtab = NULL;
1730
cee63723 1731 if (opt == NULL)
1da177e4
LT
1732 return -EINVAL;
1733
27a3421e 1734 err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy);
cee63723
PM
1735 if (err < 0)
1736 return err;
1737
1da177e4
LT
1738 if (cl) {
1739 /* Check parent */
1740 if (parentid) {
d77fea2e
PM
1741 if (cl->tparent &&
1742 cl->tparent->common.classid != parentid)
1da177e4
LT
1743 return -EINVAL;
1744 if (!cl->tparent && parentid != TC_H_ROOT)
1745 return -EINVAL;
1746 }
1747
1e90474c 1748 if (tb[TCA_CBQ_RATE]) {
71bcb09a
SH
1749 rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]),
1750 tb[TCA_CBQ_RTAB]);
1da177e4
LT
1751 if (rtab == NULL)
1752 return -EINVAL;
1753 }
1754
71bcb09a 1755 if (tca[TCA_RATE]) {
22e0f8b9
JF
1756 err = gen_replace_estimator(&cl->bstats, NULL,
1757 &cl->rate_est,
71bcb09a
SH
1758 qdisc_root_sleeping_lock(sch),
1759 tca[TCA_RATE]);
1760 if (err) {
79c11f2e 1761 qdisc_put_rtab(rtab);
71bcb09a
SH
1762 return err;
1763 }
1764 }
1765
1da177e4
LT
1766 /* Change class parameters */
1767 sch_tree_lock(sch);
1768
1769 if (cl->next_alive != NULL)
1770 cbq_deactivate_class(cl);
1771
1772 if (rtab) {
b94c8afc
PM
1773 qdisc_put_rtab(cl->R_tab);
1774 cl->R_tab = rtab;
1da177e4
LT
1775 }
1776
1e90474c
PM
1777 if (tb[TCA_CBQ_LSSOPT])
1778 cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT]));
1da177e4 1779
1e90474c 1780 if (tb[TCA_CBQ_WRROPT]) {
1da177e4 1781 cbq_rmprio(q, cl);
1e90474c 1782 cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT]));
1da177e4
LT
1783 }
1784
1e90474c
PM
1785 if (tb[TCA_CBQ_OVL_STRATEGY])
1786 cbq_set_overlimit(cl, nla_data(tb[TCA_CBQ_OVL_STRATEGY]));
1da177e4 1787
c3bc7cff 1788#ifdef CONFIG_NET_CLS_ACT
1e90474c
PM
1789 if (tb[TCA_CBQ_POLICE])
1790 cbq_set_police(cl, nla_data(tb[TCA_CBQ_POLICE]));
1da177e4
LT
1791#endif
1792
1e90474c
PM
1793 if (tb[TCA_CBQ_FOPT])
1794 cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT]));
1da177e4
LT
1795
1796 if (cl->q->q.qlen)
1797 cbq_activate_class(cl);
1798
1799 sch_tree_unlock(sch);
1800
1da177e4
LT
1801 return 0;
1802 }
1803
1804 if (parentid == TC_H_ROOT)
1805 return -EINVAL;
1806
1e90474c
PM
1807 if (tb[TCA_CBQ_WRROPT] == NULL || tb[TCA_CBQ_RATE] == NULL ||
1808 tb[TCA_CBQ_LSSOPT] == NULL)
1da177e4
LT
1809 return -EINVAL;
1810
1e90474c 1811 rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]), tb[TCA_CBQ_RTAB]);
1da177e4
LT
1812 if (rtab == NULL)
1813 return -EINVAL;
1814
1815 if (classid) {
1816 err = -EINVAL;
cc7ec456
ED
1817 if (TC_H_MAJ(classid ^ sch->handle) ||
1818 cbq_class_lookup(q, classid))
1da177e4
LT
1819 goto failure;
1820 } else {
1821 int i;
cc7ec456 1822 classid = TC_H_MAKE(sch->handle, 0x8000);
1da177e4 1823
cc7ec456 1824 for (i = 0; i < 0x8000; i++) {
1da177e4
LT
1825 if (++q->hgenerator >= 0x8000)
1826 q->hgenerator = 1;
1827 if (cbq_class_lookup(q, classid|q->hgenerator) == NULL)
1828 break;
1829 }
1830 err = -ENOSR;
1831 if (i >= 0x8000)
1832 goto failure;
1833 classid = classid|q->hgenerator;
1834 }
1835
1836 parent = &q->link;
1837 if (parentid) {
1838 parent = cbq_class_lookup(q, parentid);
1839 err = -EINVAL;
1840 if (parent == NULL)
1841 goto failure;
1842 }
1843
1844 err = -ENOBUFS;
0da974f4 1845 cl = kzalloc(sizeof(*cl), GFP_KERNEL);
1da177e4
LT
1846 if (cl == NULL)
1847 goto failure;
71bcb09a
SH
1848
1849 if (tca[TCA_RATE]) {
22e0f8b9 1850 err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est,
71bcb09a
SH
1851 qdisc_root_sleeping_lock(sch),
1852 tca[TCA_RATE]);
1853 if (err) {
1854 kfree(cl);
1855 goto failure;
1856 }
1857 }
1858
1da177e4
LT
1859 cl->R_tab = rtab;
1860 rtab = NULL;
1861 cl->refcnt = 1;
3511c913
CG
1862 cl->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid);
1863 if (!cl->q)
1da177e4 1864 cl->q = &noop_qdisc;
d77fea2e 1865 cl->common.classid = classid;
1da177e4
LT
1866 cl->tparent = parent;
1867 cl->qdisc = sch;
1868 cl->allot = parent->allot;
1869 cl->quantum = cl->allot;
1870 cl->weight = cl->R_tab->rate.rate;
1da177e4
LT
1871
1872 sch_tree_lock(sch);
1873 cbq_link_class(cl);
1874 cl->borrow = cl->tparent;
1875 if (cl->tparent != &q->link)
1876 cl->share = cl->tparent;
1877 cbq_adjust_levels(parent);
1878 cl->minidle = -0x7FFFFFFF;
1e90474c
PM
1879 cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT]));
1880 cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT]));
cc7ec456 1881 if (cl->ewma_log == 0)
1da177e4 1882 cl->ewma_log = q->link.ewma_log;
cc7ec456 1883 if (cl->maxidle == 0)
1da177e4 1884 cl->maxidle = q->link.maxidle;
cc7ec456 1885 if (cl->avpkt == 0)
1da177e4
LT
1886 cl->avpkt = q->link.avpkt;
1887 cl->overlimit = cbq_ovl_classic;
1e90474c
PM
1888 if (tb[TCA_CBQ_OVL_STRATEGY])
1889 cbq_set_overlimit(cl, nla_data(tb[TCA_CBQ_OVL_STRATEGY]));
c3bc7cff 1890#ifdef CONFIG_NET_CLS_ACT
1e90474c
PM
1891 if (tb[TCA_CBQ_POLICE])
1892 cbq_set_police(cl, nla_data(tb[TCA_CBQ_POLICE]));
1da177e4 1893#endif
1e90474c
PM
1894 if (tb[TCA_CBQ_FOPT])
1895 cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT]));
1da177e4
LT
1896 sch_tree_unlock(sch);
1897
d77fea2e
PM
1898 qdisc_class_hash_grow(sch, &q->clhash);
1899
1da177e4
LT
1900 *arg = (unsigned long)cl;
1901 return 0;
1902
1903failure:
1904 qdisc_put_rtab(rtab);
1905 return err;
1906}
1907
1908static int cbq_delete(struct Qdisc *sch, unsigned long arg)
1909{
1910 struct cbq_sched_data *q = qdisc_priv(sch);
cc7ec456 1911 struct cbq_class *cl = (struct cbq_class *)arg;
2ccccf5f 1912 unsigned int qlen, backlog;
1da177e4
LT
1913
1914 if (cl->filters || cl->children || cl == &q->link)
1915 return -EBUSY;
1916
1917 sch_tree_lock(sch);
1918
a37ef2e3 1919 qlen = cl->q->q.qlen;
2ccccf5f 1920 backlog = cl->q->qstats.backlog;
a37ef2e3 1921 qdisc_reset(cl->q);
2ccccf5f 1922 qdisc_tree_reduce_backlog(cl->q, qlen, backlog);
a37ef2e3 1923
1da177e4
LT
1924 if (cl->next_alive)
1925 cbq_deactivate_class(cl);
1926
1927 if (q->tx_borrowed == cl)
1928 q->tx_borrowed = q->tx_class;
1929 if (q->tx_class == cl) {
1930 q->tx_class = NULL;
1931 q->tx_borrowed = NULL;
1932 }
c3bc7cff 1933#ifdef CONFIG_NET_CLS_ACT
1da177e4
LT
1934 if (q->rx_class == cl)
1935 q->rx_class = NULL;
1936#endif
1937
1938 cbq_unlink_class(cl);
1939 cbq_adjust_levels(cl->tparent);
1940 cl->defmap = 0;
1941 cbq_sync_defmap(cl);
1942
1943 cbq_rmprio(q, cl);
1944 sch_tree_unlock(sch);
1945
7cd0a638
JP
1946 BUG_ON(--cl->refcnt == 0);
1947 /*
1948 * This shouldn't happen: we "hold" one cops->get() when called
1949 * from tc_ctl_tclass; the destroy method is done from cops->put().
1950 */
1da177e4
LT
1951
1952 return 0;
1953}
1954
25d8c0d5
JF
1955static struct tcf_proto __rcu **cbq_find_tcf(struct Qdisc *sch,
1956 unsigned long arg)
1da177e4
LT
1957{
1958 struct cbq_sched_data *q = qdisc_priv(sch);
1959 struct cbq_class *cl = (struct cbq_class *)arg;
1960
1961 if (cl == NULL)
1962 cl = &q->link;
1963
1964 return &cl->filter_list;
1965}
1966
1967static unsigned long cbq_bind_filter(struct Qdisc *sch, unsigned long parent,
1968 u32 classid)
1969{
1970 struct cbq_sched_data *q = qdisc_priv(sch);
cc7ec456 1971 struct cbq_class *p = (struct cbq_class *)parent;
1da177e4
LT
1972 struct cbq_class *cl = cbq_class_lookup(q, classid);
1973
1974 if (cl) {
1975 if (p && p->level <= cl->level)
1976 return 0;
1977 cl->filters++;
1978 return (unsigned long)cl;
1979 }
1980 return 0;
1981}
1982
1983static void cbq_unbind_filter(struct Qdisc *sch, unsigned long arg)
1984{
cc7ec456 1985 struct cbq_class *cl = (struct cbq_class *)arg;
1da177e4
LT
1986
1987 cl->filters--;
1988}
1989
1990static void cbq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1991{
1992 struct cbq_sched_data *q = qdisc_priv(sch);
d77fea2e 1993 struct cbq_class *cl;
cc7ec456 1994 unsigned int h;
1da177e4
LT
1995
1996 if (arg->stop)
1997 return;
1998
d77fea2e 1999 for (h = 0; h < q->clhash.hashsize; h++) {
b67bfe0d 2000 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
1da177e4
LT
2001 if (arg->count < arg->skip) {
2002 arg->count++;
2003 continue;
2004 }
2005 if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
2006 arg->stop = 1;
2007 return;
2008 }
2009 arg->count++;
2010 }
2011 }
2012}
2013
20fea08b 2014static const struct Qdisc_class_ops cbq_class_ops = {
1da177e4
LT
2015 .graft = cbq_graft,
2016 .leaf = cbq_leaf,
a37ef2e3 2017 .qlen_notify = cbq_qlen_notify,
1da177e4
LT
2018 .get = cbq_get,
2019 .put = cbq_put,
2020 .change = cbq_change_class,
2021 .delete = cbq_delete,
2022 .walk = cbq_walk,
2023 .tcf_chain = cbq_find_tcf,
2024 .bind_tcf = cbq_bind_filter,
2025 .unbind_tcf = cbq_unbind_filter,
2026 .dump = cbq_dump_class,
2027 .dump_stats = cbq_dump_class_stats,
2028};
2029
20fea08b 2030static struct Qdisc_ops cbq_qdisc_ops __read_mostly = {
1da177e4
LT
2031 .next = NULL,
2032 .cl_ops = &cbq_class_ops,
2033 .id = "cbq",
2034 .priv_size = sizeof(struct cbq_sched_data),
2035 .enqueue = cbq_enqueue,
2036 .dequeue = cbq_dequeue,
77be155c 2037 .peek = qdisc_peek_dequeued,
1da177e4
LT
2038 .drop = cbq_drop,
2039 .init = cbq_init,
2040 .reset = cbq_reset,
2041 .destroy = cbq_destroy,
2042 .change = NULL,
2043 .dump = cbq_dump,
2044 .dump_stats = cbq_dump_stats,
2045 .owner = THIS_MODULE,
2046};
2047
2048static int __init cbq_module_init(void)
2049{
2050 return register_qdisc(&cbq_qdisc_ops);
2051}
10297b99 2052static void __exit cbq_module_exit(void)
1da177e4
LT
2053{
2054 unregister_qdisc(&cbq_qdisc_ops);
2055}
2056module_init(cbq_module_init)
2057module_exit(cbq_module_exit)
2058MODULE_LICENSE("GPL");