[NET_SCHED]: Fix prio/ingress classification logic error
[linux-2.6-block.git] / net / sched / sch_api.c
CommitLineData
1da177e4
LT
1/*
2 * net/sched/sch_api.c Packet scheduler API.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 *
11 * Fixes:
12 *
13 * Rani Assaf <rani@magic.metawire.com> :980802: JIFFIES and CPU clock sources are repaired.
14 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
15 * Jamal Hadi Salim <hadi@nortelnetworks.com>: 990601: ingress support
16 */
17
1da177e4
LT
18#include <linux/module.h>
19#include <linux/types.h>
20#include <linux/kernel.h>
1da177e4 21#include <linux/string.h>
1da177e4 22#include <linux/errno.h>
1da177e4 23#include <linux/skbuff.h>
1da177e4
LT
24#include <linux/init.h>
25#include <linux/proc_fs.h>
26#include <linux/seq_file.h>
27#include <linux/kmod.h>
28#include <linux/list.h>
4179477f 29#include <linux/hrtimer.h>
1da177e4 30
dc5fc579 31#include <net/netlink.h>
1da177e4
LT
32#include <net/pkt_sched.h>
33
1da177e4
LT
34static int qdisc_notify(struct sk_buff *oskb, struct nlmsghdr *n, u32 clid,
35 struct Qdisc *old, struct Qdisc *new);
36static int tclass_notify(struct sk_buff *oskb, struct nlmsghdr *n,
37 struct Qdisc *q, unsigned long cl, int event);
38
39/*
40
41 Short review.
42 -------------
43
44 This file consists of two interrelated parts:
45
46 1. queueing disciplines manager frontend.
47 2. traffic classes manager frontend.
48
49 Generally, queueing discipline ("qdisc") is a black box,
50 which is able to enqueue packets and to dequeue them (when
51 device is ready to send something) in order and at times
52 determined by algorithm hidden in it.
53
54 qdisc's are divided to two categories:
55 - "queues", which have no internal structure visible from outside.
56 - "schedulers", which split all the packets to "traffic classes",
57 using "packet classifiers" (look at cls_api.c)
58
59 In turn, classes may have child qdiscs (as rule, queues)
60 attached to them etc. etc. etc.
61
62 The goal of the routines in this file is to translate
63 information supplied by user in the form of handles
64 to more intelligible for kernel form, to make some sanity
65 checks and part of work, which is common to all qdiscs
66 and to provide rtnetlink notifications.
67
68 All real intelligent work is done inside qdisc modules.
69
70
71
72 Every discipline has two major routines: enqueue and dequeue.
73
74 ---dequeue
75
76 dequeue usually returns a skb to send. It is allowed to return NULL,
77 but it does not mean that queue is empty, it just means that
78 discipline does not want to send anything this time.
79 Queue is really empty if q->q.qlen == 0.
80 For complicated disciplines with multiple queues q->q is not
81 real packet queue, but however q->q.qlen must be valid.
82
83 ---enqueue
84
85 enqueue returns 0, if packet was enqueued successfully.
86 If packet (this one or another one) was dropped, it returns
87 not zero error code.
88 NET_XMIT_DROP - this packet dropped
89 Expected action: do not backoff, but wait until queue will clear.
90 NET_XMIT_CN - probably this packet enqueued, but another one dropped.
91 Expected action: backoff or ignore
92 NET_XMIT_POLICED - dropped by police.
93 Expected action: backoff or error to real-time apps.
94
95 Auxiliary routines:
96
97 ---requeue
98
99 requeues once dequeued packet. It is used for non-standard or
100 just buggy devices, which can defer output even if dev->tbusy=0.
101
102 ---reset
103
104 returns qdisc to initial state: purge all buffers, clear all
105 timers, counters (except for statistics) etc.
106
107 ---init
108
109 initializes newly created qdisc.
110
111 ---destroy
112
113 destroys resources allocated by init and during lifetime of qdisc.
114
115 ---change
116
117 changes qdisc parameters.
118 */
119
120/* Protects list of registered TC modules. It is pure SMP lock. */
121static DEFINE_RWLOCK(qdisc_mod_lock);
122
123
124/************************************************
125 * Queueing disciplines manipulation. *
126 ************************************************/
127
128
129/* The list of all installed queueing disciplines. */
130
131static struct Qdisc_ops *qdisc_base;
132
133/* Register/uregister queueing discipline */
134
135int register_qdisc(struct Qdisc_ops *qops)
136{
137 struct Qdisc_ops *q, **qp;
138 int rc = -EEXIST;
139
140 write_lock(&qdisc_mod_lock);
141 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
142 if (!strcmp(qops->id, q->id))
143 goto out;
144
145 if (qops->enqueue == NULL)
146 qops->enqueue = noop_qdisc_ops.enqueue;
147 if (qops->requeue == NULL)
148 qops->requeue = noop_qdisc_ops.requeue;
149 if (qops->dequeue == NULL)
150 qops->dequeue = noop_qdisc_ops.dequeue;
151
152 qops->next = NULL;
153 *qp = qops;
154 rc = 0;
155out:
156 write_unlock(&qdisc_mod_lock);
157 return rc;
158}
159
160int unregister_qdisc(struct Qdisc_ops *qops)
161{
162 struct Qdisc_ops *q, **qp;
163 int err = -ENOENT;
164
165 write_lock(&qdisc_mod_lock);
166 for (qp = &qdisc_base; (q=*qp)!=NULL; qp = &q->next)
167 if (q == qops)
168 break;
169 if (q) {
170 *qp = q->next;
171 q->next = NULL;
172 err = 0;
173 }
174 write_unlock(&qdisc_mod_lock);
175 return err;
176}
177
178/* We know handle. Find qdisc among all qdisc's attached to device
179 (root qdisc, all its children, children of children etc.)
180 */
181
0463d4ae 182struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
1da177e4
LT
183{
184 struct Qdisc *q;
185
1da177e4 186 list_for_each_entry(q, &dev->qdisc_list, list) {
43effa1e 187 if (q->handle == handle)
1da177e4 188 return q;
1da177e4 189 }
1da177e4
LT
190 return NULL;
191}
192
193static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
194{
195 unsigned long cl;
196 struct Qdisc *leaf;
197 struct Qdisc_class_ops *cops = p->ops->cl_ops;
198
199 if (cops == NULL)
200 return NULL;
201 cl = cops->get(p, classid);
202
203 if (cl == 0)
204 return NULL;
205 leaf = cops->leaf(p, cl);
206 cops->put(p, cl);
207 return leaf;
208}
209
210/* Find queueing discipline by name */
211
212static struct Qdisc_ops *qdisc_lookup_ops(struct rtattr *kind)
213{
214 struct Qdisc_ops *q = NULL;
215
216 if (kind) {
217 read_lock(&qdisc_mod_lock);
218 for (q = qdisc_base; q; q = q->next) {
219 if (rtattr_strcmp(kind, q->id) == 0) {
220 if (!try_module_get(q->owner))
221 q = NULL;
222 break;
223 }
224 }
225 read_unlock(&qdisc_mod_lock);
226 }
227 return q;
228}
229
230static struct qdisc_rate_table *qdisc_rtab_list;
231
232struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct rtattr *tab)
233{
234 struct qdisc_rate_table *rtab;
235
236 for (rtab = qdisc_rtab_list; rtab; rtab = rtab->next) {
237 if (memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) == 0) {
238 rtab->refcnt++;
239 return rtab;
240 }
241 }
242
243 if (tab == NULL || r->rate == 0 || r->cell_log == 0 || RTA_PAYLOAD(tab) != 1024)
244 return NULL;
245
246 rtab = kmalloc(sizeof(*rtab), GFP_KERNEL);
247 if (rtab) {
248 rtab->rate = *r;
249 rtab->refcnt = 1;
250 memcpy(rtab->data, RTA_DATA(tab), 1024);
251 rtab->next = qdisc_rtab_list;
252 qdisc_rtab_list = rtab;
253 }
254 return rtab;
255}
256
257void qdisc_put_rtab(struct qdisc_rate_table *tab)
258{
259 struct qdisc_rate_table *rtab, **rtabp;
260
261 if (!tab || --tab->refcnt)
262 return;
263
264 for (rtabp = &qdisc_rtab_list; (rtab=*rtabp) != NULL; rtabp = &rtab->next) {
265 if (rtab == tab) {
266 *rtabp = rtab->next;
267 kfree(rtab);
268 return;
269 }
270 }
271}
272
4179477f
PM
273static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
274{
275 struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
276 timer);
1936502d 277 struct net_device *dev = wd->qdisc->dev;
4179477f
PM
278
279 wd->qdisc->flags &= ~TCQ_F_THROTTLED;
11274e5a 280 smp_wmb();
0621ed2e 281 netif_schedule(dev);
1936502d 282
4179477f
PM
283 return HRTIMER_NORESTART;
284}
285
286void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
287{
288 hrtimer_init(&wd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
289 wd->timer.function = qdisc_watchdog;
290 wd->qdisc = qdisc;
291}
292EXPORT_SYMBOL(qdisc_watchdog_init);
293
294void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, psched_time_t expires)
295{
296 ktime_t time;
297
298 wd->qdisc->flags |= TCQ_F_THROTTLED;
299 time = ktime_set(0, 0);
300 time = ktime_add_ns(time, PSCHED_US2NS(expires));
301 hrtimer_start(&wd->timer, time, HRTIMER_MODE_ABS);
302}
303EXPORT_SYMBOL(qdisc_watchdog_schedule);
304
305void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
306{
307 hrtimer_cancel(&wd->timer);
308 wd->qdisc->flags &= ~TCQ_F_THROTTLED;
309}
310EXPORT_SYMBOL(qdisc_watchdog_cancel);
1da177e4
LT
311
312/* Allocate an unique handle from space managed by kernel */
313
314static u32 qdisc_alloc_handle(struct net_device *dev)
315{
316 int i = 0x10000;
317 static u32 autohandle = TC_H_MAKE(0x80000000U, 0);
318
319 do {
320 autohandle += TC_H_MAKE(0x10000U, 0);
321 if (autohandle == TC_H_MAKE(TC_H_ROOT, 0))
322 autohandle = TC_H_MAKE(0x80000000U, 0);
323 } while (qdisc_lookup(dev, autohandle) && --i > 0);
324
325 return i>0 ? autohandle : 0;
326}
327
328/* Attach toplevel qdisc to device dev */
329
330static struct Qdisc *
331dev_graft_qdisc(struct net_device *dev, struct Qdisc *qdisc)
332{
333 struct Qdisc *oqdisc;
334
335 if (dev->flags & IFF_UP)
336 dev_deactivate(dev);
337
338 qdisc_lock_tree(dev);
339 if (qdisc && qdisc->flags&TCQ_F_INGRESS) {
340 oqdisc = dev->qdisc_ingress;
341 /* Prune old scheduler */
342 if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1) {
343 /* delete */
344 qdisc_reset(oqdisc);
345 dev->qdisc_ingress = NULL;
346 } else { /* new */
347 dev->qdisc_ingress = qdisc;
348 }
349
350 } else {
351
352 oqdisc = dev->qdisc_sleeping;
353
354 /* Prune old scheduler */
355 if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1)
356 qdisc_reset(oqdisc);
357
358 /* ... and graft new one */
359 if (qdisc == NULL)
360 qdisc = &noop_qdisc;
361 dev->qdisc_sleeping = qdisc;
362 dev->qdisc = &noop_qdisc;
363 }
364
365 qdisc_unlock_tree(dev);
366
367 if (dev->flags & IFF_UP)
368 dev_activate(dev);
369
370 return oqdisc;
371}
372
43effa1e
PM
373void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
374{
375 struct Qdisc_class_ops *cops;
376 unsigned long cl;
377 u32 parentid;
378
379 if (n == 0)
380 return;
381 while ((parentid = sch->parent)) {
0463d4ae 382 sch = qdisc_lookup(sch->dev, TC_H_MAJ(parentid));
43effa1e
PM
383 cops = sch->ops->cl_ops;
384 if (cops->qlen_notify) {
385 cl = cops->get(sch, parentid);
386 cops->qlen_notify(sch, cl);
387 cops->put(sch, cl);
388 }
389 sch->q.qlen -= n;
390 }
391}
392EXPORT_SYMBOL(qdisc_tree_decrease_qlen);
1da177e4
LT
393
394/* Graft qdisc "new" to class "classid" of qdisc "parent" or
395 to device "dev".
396
397 Old qdisc is not destroyed but returned in *old.
398 */
399
400static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
401 u32 classid,
402 struct Qdisc *new, struct Qdisc **old)
403{
404 int err = 0;
405 struct Qdisc *q = *old;
406
407
10297b99 408 if (parent == NULL) {
1da177e4
LT
409 if (q && q->flags&TCQ_F_INGRESS) {
410 *old = dev_graft_qdisc(dev, q);
411 } else {
412 *old = dev_graft_qdisc(dev, new);
413 }
414 } else {
415 struct Qdisc_class_ops *cops = parent->ops->cl_ops;
416
417 err = -EINVAL;
418
419 if (cops) {
420 unsigned long cl = cops->get(parent, classid);
421 if (cl) {
422 err = cops->graft(parent, cl, new, old);
423 if (new)
424 new->parent = classid;
425 cops->put(parent, cl);
426 }
427 }
428 }
429 return err;
430}
431
432/*
433 Allocate and initialize new qdisc.
434
435 Parameters are passed via opt.
436 */
437
438static struct Qdisc *
439qdisc_create(struct net_device *dev, u32 handle, struct rtattr **tca, int *errp)
440{
441 int err;
442 struct rtattr *kind = tca[TCA_KIND-1];
1da177e4
LT
443 struct Qdisc *sch;
444 struct Qdisc_ops *ops;
1da177e4
LT
445
446 ops = qdisc_lookup_ops(kind);
447#ifdef CONFIG_KMOD
448 if (ops == NULL && kind != NULL) {
449 char name[IFNAMSIZ];
450 if (rtattr_strlcpy(name, kind, IFNAMSIZ) < IFNAMSIZ) {
451 /* We dropped the RTNL semaphore in order to
452 * perform the module load. So, even if we
453 * succeeded in loading the module we have to
454 * tell the caller to replay the request. We
455 * indicate this using -EAGAIN.
456 * We replay the request because the device may
457 * go away in the mean time.
458 */
459 rtnl_unlock();
460 request_module("sch_%s", name);
461 rtnl_lock();
462 ops = qdisc_lookup_ops(kind);
463 if (ops != NULL) {
464 /* We will try again qdisc_lookup_ops,
465 * so don't keep a reference.
466 */
467 module_put(ops->owner);
468 err = -EAGAIN;
469 goto err_out;
470 }
471 }
472 }
473#endif
474
b9e2cc0f 475 err = -ENOENT;
1da177e4
LT
476 if (ops == NULL)
477 goto err_out;
478
3d54b82f
TG
479 sch = qdisc_alloc(dev, ops);
480 if (IS_ERR(sch)) {
481 err = PTR_ERR(sch);
1da177e4 482 goto err_out2;
3d54b82f 483 }
1da177e4 484
3d54b82f 485 if (handle == TC_H_INGRESS) {
1da177e4 486 sch->flags |= TCQ_F_INGRESS;
fd44de7c 487 sch->stats_lock = &dev->ingress_lock;
3d54b82f 488 handle = TC_H_MAKE(TC_H_INGRESS, 0);
fd44de7c
PM
489 } else {
490 sch->stats_lock = &dev->queue_lock;
491 if (handle == 0) {
492 handle = qdisc_alloc_handle(dev);
493 err = -ENOMEM;
494 if (handle == 0)
495 goto err_out3;
496 }
1da177e4
LT
497 }
498
3d54b82f 499 sch->handle = handle;
1da177e4
LT
500
501 if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS-1])) == 0) {
023e09a7
TG
502 if (tca[TCA_RATE-1]) {
503 err = gen_new_estimator(&sch->bstats, &sch->rate_est,
504 sch->stats_lock,
505 tca[TCA_RATE-1]);
506 if (err) {
507 /*
508 * Any broken qdiscs that would require
509 * a ops->reset() here? The qdisc was never
510 * in action so it shouldn't be necessary.
511 */
512 if (ops->destroy)
513 ops->destroy(sch);
514 goto err_out3;
515 }
516 }
1da177e4
LT
517 qdisc_lock_tree(dev);
518 list_add_tail(&sch->list, &dev->qdisc_list);
519 qdisc_unlock_tree(dev);
520
1da177e4
LT
521 return sch;
522 }
523err_out3:
524 dev_put(dev);
3d54b82f 525 kfree((char *) sch - sch->padded);
1da177e4
LT
526err_out2:
527 module_put(ops->owner);
528err_out:
529 *errp = err;
1da177e4
LT
530 return NULL;
531}
532
533static int qdisc_change(struct Qdisc *sch, struct rtattr **tca)
534{
535 if (tca[TCA_OPTIONS-1]) {
536 int err;
537
538 if (sch->ops->change == NULL)
539 return -EINVAL;
540 err = sch->ops->change(sch, tca[TCA_OPTIONS-1]);
541 if (err)
542 return err;
543 }
1da177e4
LT
544 if (tca[TCA_RATE-1])
545 gen_replace_estimator(&sch->bstats, &sch->rate_est,
546 sch->stats_lock, tca[TCA_RATE-1]);
1da177e4
LT
547 return 0;
548}
549
550struct check_loop_arg
551{
552 struct qdisc_walker w;
553 struct Qdisc *p;
554 int depth;
555};
556
557static int check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w);
558
559static int check_loop(struct Qdisc *q, struct Qdisc *p, int depth)
560{
561 struct check_loop_arg arg;
562
563 if (q->ops->cl_ops == NULL)
564 return 0;
565
566 arg.w.stop = arg.w.skip = arg.w.count = 0;
567 arg.w.fn = check_loop_fn;
568 arg.depth = depth;
569 arg.p = p;
570 q->ops->cl_ops->walk(q, &arg.w);
571 return arg.w.stop ? -ELOOP : 0;
572}
573
574static int
575check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
576{
577 struct Qdisc *leaf;
578 struct Qdisc_class_ops *cops = q->ops->cl_ops;
579 struct check_loop_arg *arg = (struct check_loop_arg *)w;
580
581 leaf = cops->leaf(q, cl);
582 if (leaf) {
583 if (leaf == arg->p || arg->depth > 7)
584 return -ELOOP;
585 return check_loop(leaf, arg->p, arg->depth + 1);
586 }
587 return 0;
588}
589
590/*
591 * Delete/get qdisc.
592 */
593
594static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
595{
596 struct tcmsg *tcm = NLMSG_DATA(n);
597 struct rtattr **tca = arg;
598 struct net_device *dev;
599 u32 clid = tcm->tcm_parent;
600 struct Qdisc *q = NULL;
601 struct Qdisc *p = NULL;
602 int err;
603
604 if ((dev = __dev_get_by_index(tcm->tcm_ifindex)) == NULL)
605 return -ENODEV;
606
607 if (clid) {
608 if (clid != TC_H_ROOT) {
609 if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) {
610 if ((p = qdisc_lookup(dev, TC_H_MAJ(clid))) == NULL)
611 return -ENOENT;
612 q = qdisc_leaf(p, clid);
613 } else { /* ingress */
614 q = dev->qdisc_ingress;
10297b99 615 }
1da177e4
LT
616 } else {
617 q = dev->qdisc_sleeping;
618 }
619 if (!q)
620 return -ENOENT;
621
622 if (tcm->tcm_handle && q->handle != tcm->tcm_handle)
623 return -EINVAL;
624 } else {
625 if ((q = qdisc_lookup(dev, tcm->tcm_handle)) == NULL)
626 return -ENOENT;
627 }
628
629 if (tca[TCA_KIND-1] && rtattr_strcmp(tca[TCA_KIND-1], q->ops->id))
630 return -EINVAL;
631
632 if (n->nlmsg_type == RTM_DELQDISC) {
633 if (!clid)
634 return -EINVAL;
635 if (q->handle == 0)
636 return -ENOENT;
637 if ((err = qdisc_graft(dev, p, clid, NULL, &q)) != 0)
638 return err;
639 if (q) {
640 qdisc_notify(skb, n, clid, q, NULL);
fd44de7c 641 qdisc_lock_tree(dev);
1da177e4 642 qdisc_destroy(q);
fd44de7c 643 qdisc_unlock_tree(dev);
1da177e4
LT
644 }
645 } else {
646 qdisc_notify(skb, n, clid, NULL, q);
647 }
648 return 0;
649}
650
651/*
652 Create/change qdisc.
653 */
654
655static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
656{
657 struct tcmsg *tcm;
658 struct rtattr **tca;
659 struct net_device *dev;
660 u32 clid;
661 struct Qdisc *q, *p;
662 int err;
663
664replay:
665 /* Reinit, just in case something touches this. */
666 tcm = NLMSG_DATA(n);
667 tca = arg;
668 clid = tcm->tcm_parent;
669 q = p = NULL;
670
671 if ((dev = __dev_get_by_index(tcm->tcm_ifindex)) == NULL)
672 return -ENODEV;
673
674 if (clid) {
675 if (clid != TC_H_ROOT) {
676 if (clid != TC_H_INGRESS) {
677 if ((p = qdisc_lookup(dev, TC_H_MAJ(clid))) == NULL)
678 return -ENOENT;
679 q = qdisc_leaf(p, clid);
680 } else { /*ingress */
681 q = dev->qdisc_ingress;
682 }
683 } else {
684 q = dev->qdisc_sleeping;
685 }
686
687 /* It may be default qdisc, ignore it */
688 if (q && q->handle == 0)
689 q = NULL;
690
691 if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) {
692 if (tcm->tcm_handle) {
693 if (q && !(n->nlmsg_flags&NLM_F_REPLACE))
694 return -EEXIST;
695 if (TC_H_MIN(tcm->tcm_handle))
696 return -EINVAL;
697 if ((q = qdisc_lookup(dev, tcm->tcm_handle)) == NULL)
698 goto create_n_graft;
699 if (n->nlmsg_flags&NLM_F_EXCL)
700 return -EEXIST;
701 if (tca[TCA_KIND-1] && rtattr_strcmp(tca[TCA_KIND-1], q->ops->id))
702 return -EINVAL;
703 if (q == p ||
704 (p && check_loop(q, p, 0)))
705 return -ELOOP;
706 atomic_inc(&q->refcnt);
707 goto graft;
708 } else {
709 if (q == NULL)
710 goto create_n_graft;
711
712 /* This magic test requires explanation.
713 *
714 * We know, that some child q is already
715 * attached to this parent and have choice:
716 * either to change it or to create/graft new one.
717 *
718 * 1. We are allowed to create/graft only
719 * if CREATE and REPLACE flags are set.
720 *
721 * 2. If EXCL is set, requestor wanted to say,
722 * that qdisc tcm_handle is not expected
723 * to exist, so that we choose create/graft too.
724 *
725 * 3. The last case is when no flags are set.
726 * Alas, it is sort of hole in API, we
727 * cannot decide what to do unambiguously.
728 * For now we select create/graft, if
729 * user gave KIND, which does not match existing.
730 */
731 if ((n->nlmsg_flags&NLM_F_CREATE) &&
732 (n->nlmsg_flags&NLM_F_REPLACE) &&
733 ((n->nlmsg_flags&NLM_F_EXCL) ||
734 (tca[TCA_KIND-1] &&
735 rtattr_strcmp(tca[TCA_KIND-1], q->ops->id))))
736 goto create_n_graft;
737 }
738 }
739 } else {
740 if (!tcm->tcm_handle)
741 return -EINVAL;
742 q = qdisc_lookup(dev, tcm->tcm_handle);
743 }
744
745 /* Change qdisc parameters */
746 if (q == NULL)
747 return -ENOENT;
748 if (n->nlmsg_flags&NLM_F_EXCL)
749 return -EEXIST;
750 if (tca[TCA_KIND-1] && rtattr_strcmp(tca[TCA_KIND-1], q->ops->id))
751 return -EINVAL;
752 err = qdisc_change(q, tca);
753 if (err == 0)
754 qdisc_notify(skb, n, clid, NULL, q);
755 return err;
756
757create_n_graft:
758 if (!(n->nlmsg_flags&NLM_F_CREATE))
759 return -ENOENT;
760 if (clid == TC_H_INGRESS)
761 q = qdisc_create(dev, tcm->tcm_parent, tca, &err);
10297b99 762 else
1da177e4
LT
763 q = qdisc_create(dev, tcm->tcm_handle, tca, &err);
764 if (q == NULL) {
765 if (err == -EAGAIN)
766 goto replay;
767 return err;
768 }
769
770graft:
771 if (1) {
772 struct Qdisc *old_q = NULL;
773 err = qdisc_graft(dev, p, clid, q, &old_q);
774 if (err) {
775 if (q) {
fd44de7c 776 qdisc_lock_tree(dev);
1da177e4 777 qdisc_destroy(q);
fd44de7c 778 qdisc_unlock_tree(dev);
1da177e4
LT
779 }
780 return err;
781 }
782 qdisc_notify(skb, n, clid, old_q, q);
783 if (old_q) {
fd44de7c 784 qdisc_lock_tree(dev);
1da177e4 785 qdisc_destroy(old_q);
fd44de7c 786 qdisc_unlock_tree(dev);
1da177e4
LT
787 }
788 }
789 return 0;
790}
791
792static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
e431b8c0 793 u32 pid, u32 seq, u16 flags, int event)
1da177e4
LT
794{
795 struct tcmsg *tcm;
796 struct nlmsghdr *nlh;
27a884dc 797 unsigned char *b = skb_tail_pointer(skb);
1da177e4
LT
798 struct gnet_dump d;
799
e431b8c0 800 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags);
1da177e4
LT
801 tcm = NLMSG_DATA(nlh);
802 tcm->tcm_family = AF_UNSPEC;
9ef1d4c7
PM
803 tcm->tcm__pad1 = 0;
804 tcm->tcm__pad2 = 0;
1da177e4
LT
805 tcm->tcm_ifindex = q->dev->ifindex;
806 tcm->tcm_parent = clid;
807 tcm->tcm_handle = q->handle;
808 tcm->tcm_info = atomic_read(&q->refcnt);
809 RTA_PUT(skb, TCA_KIND, IFNAMSIZ, q->ops->id);
810 if (q->ops->dump && q->ops->dump(q, skb) < 0)
811 goto rtattr_failure;
812 q->qstats.qlen = q->q.qlen;
813
814 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS,
815 TCA_XSTATS, q->stats_lock, &d) < 0)
816 goto rtattr_failure;
817
818 if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
819 goto rtattr_failure;
820
821 if (gnet_stats_copy_basic(&d, &q->bstats) < 0 ||
1da177e4 822 gnet_stats_copy_rate_est(&d, &q->rate_est) < 0 ||
1da177e4
LT
823 gnet_stats_copy_queue(&d, &q->qstats) < 0)
824 goto rtattr_failure;
10297b99 825
1da177e4
LT
826 if (gnet_stats_finish_copy(&d) < 0)
827 goto rtattr_failure;
10297b99 828
27a884dc 829 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1da177e4
LT
830 return skb->len;
831
832nlmsg_failure:
833rtattr_failure:
dc5fc579 834 nlmsg_trim(skb, b);
1da177e4
LT
835 return -1;
836}
837
838static int qdisc_notify(struct sk_buff *oskb, struct nlmsghdr *n,
839 u32 clid, struct Qdisc *old, struct Qdisc *new)
840{
841 struct sk_buff *skb;
842 u32 pid = oskb ? NETLINK_CB(oskb).pid : 0;
843
844 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
845 if (!skb)
846 return -ENOBUFS;
847
848 if (old && old->handle) {
849 if (tc_fill_qdisc(skb, old, clid, pid, n->nlmsg_seq, 0, RTM_DELQDISC) < 0)
850 goto err_out;
851 }
852 if (new) {
853 if (tc_fill_qdisc(skb, new, clid, pid, n->nlmsg_seq, old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0)
854 goto err_out;
855 }
856
857 if (skb->len)
ac6d439d 858 return rtnetlink_send(skb, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO);
1da177e4
LT
859
860err_out:
861 kfree_skb(skb);
862 return -EINVAL;
863}
864
865static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
866{
867 int idx, q_idx;
868 int s_idx, s_q_idx;
869 struct net_device *dev;
870 struct Qdisc *q;
871
872 s_idx = cb->args[0];
873 s_q_idx = q_idx = cb->args[1];
874 read_lock(&dev_base_lock);
7562f876
PE
875 idx = 0;
876 for_each_netdev(dev) {
1da177e4 877 if (idx < s_idx)
7562f876 878 goto cont;
1da177e4
LT
879 if (idx > s_idx)
880 s_q_idx = 0;
1da177e4
LT
881 q_idx = 0;
882 list_for_each_entry(q, &dev->qdisc_list, list) {
883 if (q_idx < s_q_idx) {
884 q_idx++;
885 continue;
886 }
887 if (tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).pid,
0463d4ae 888 cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
1da177e4 889 goto done;
1da177e4
LT
890 q_idx++;
891 }
7562f876
PE
892cont:
893 idx++;
1da177e4
LT
894 }
895
896done:
897 read_unlock(&dev_base_lock);
898
899 cb->args[0] = idx;
900 cb->args[1] = q_idx;
901
902 return skb->len;
903}
904
905
906
907/************************************************
908 * Traffic classes manipulation. *
909 ************************************************/
910
911
912
913static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
914{
915 struct tcmsg *tcm = NLMSG_DATA(n);
916 struct rtattr **tca = arg;
917 struct net_device *dev;
918 struct Qdisc *q = NULL;
919 struct Qdisc_class_ops *cops;
920 unsigned long cl = 0;
921 unsigned long new_cl;
922 u32 pid = tcm->tcm_parent;
923 u32 clid = tcm->tcm_handle;
924 u32 qid = TC_H_MAJ(clid);
925 int err;
926
927 if ((dev = __dev_get_by_index(tcm->tcm_ifindex)) == NULL)
928 return -ENODEV;
929
930 /*
931 parent == TC_H_UNSPEC - unspecified parent.
932 parent == TC_H_ROOT - class is root, which has no parent.
933 parent == X:0 - parent is root class.
934 parent == X:Y - parent is a node in hierarchy.
935 parent == 0:Y - parent is X:Y, where X:0 is qdisc.
936
937 handle == 0:0 - generate handle from kernel pool.
938 handle == 0:Y - class is X:Y, where X:0 is qdisc.
939 handle == X:Y - clear.
940 handle == X:0 - root class.
941 */
942
943 /* Step 1. Determine qdisc handle X:0 */
944
945 if (pid != TC_H_ROOT) {
946 u32 qid1 = TC_H_MAJ(pid);
947
948 if (qid && qid1) {
949 /* If both majors are known, they must be identical. */
950 if (qid != qid1)
951 return -EINVAL;
952 } else if (qid1) {
953 qid = qid1;
954 } else if (qid == 0)
955 qid = dev->qdisc_sleeping->handle;
956
957 /* Now qid is genuine qdisc handle consistent
958 both with parent and child.
959
960 TC_H_MAJ(pid) still may be unspecified, complete it now.
961 */
962 if (pid)
963 pid = TC_H_MAKE(qid, pid);
964 } else {
965 if (qid == 0)
966 qid = dev->qdisc_sleeping->handle;
967 }
968
969 /* OK. Locate qdisc */
10297b99 970 if ((q = qdisc_lookup(dev, qid)) == NULL)
1da177e4
LT
971 return -ENOENT;
972
973 /* An check that it supports classes */
974 cops = q->ops->cl_ops;
975 if (cops == NULL)
976 return -EINVAL;
977
978 /* Now try to get class */
979 if (clid == 0) {
980 if (pid == TC_H_ROOT)
981 clid = qid;
982 } else
983 clid = TC_H_MAKE(qid, clid);
984
985 if (clid)
986 cl = cops->get(q, clid);
987
988 if (cl == 0) {
989 err = -ENOENT;
990 if (n->nlmsg_type != RTM_NEWTCLASS || !(n->nlmsg_flags&NLM_F_CREATE))
991 goto out;
992 } else {
993 switch (n->nlmsg_type) {
10297b99 994 case RTM_NEWTCLASS:
1da177e4
LT
995 err = -EEXIST;
996 if (n->nlmsg_flags&NLM_F_EXCL)
997 goto out;
998 break;
999 case RTM_DELTCLASS:
1000 err = cops->delete(q, cl);
1001 if (err == 0)
1002 tclass_notify(skb, n, q, cl, RTM_DELTCLASS);
1003 goto out;
1004 case RTM_GETTCLASS:
1005 err = tclass_notify(skb, n, q, cl, RTM_NEWTCLASS);
1006 goto out;
1007 default:
1008 err = -EINVAL;
1009 goto out;
1010 }
1011 }
1012
1013 new_cl = cl;
1014 err = cops->change(q, clid, pid, tca, &new_cl);
1015 if (err == 0)
1016 tclass_notify(skb, n, q, new_cl, RTM_NEWTCLASS);
1017
1018out:
1019 if (cl)
1020 cops->put(q, cl);
1021
1022 return err;
1023}
1024
1025
1026static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
1027 unsigned long cl,
e431b8c0 1028 u32 pid, u32 seq, u16 flags, int event)
1da177e4
LT
1029{
1030 struct tcmsg *tcm;
1031 struct nlmsghdr *nlh;
27a884dc 1032 unsigned char *b = skb_tail_pointer(skb);
1da177e4
LT
1033 struct gnet_dump d;
1034 struct Qdisc_class_ops *cl_ops = q->ops->cl_ops;
1035
e431b8c0 1036 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags);
1da177e4
LT
1037 tcm = NLMSG_DATA(nlh);
1038 tcm->tcm_family = AF_UNSPEC;
1039 tcm->tcm_ifindex = q->dev->ifindex;
1040 tcm->tcm_parent = q->handle;
1041 tcm->tcm_handle = q->handle;
1042 tcm->tcm_info = 0;
1043 RTA_PUT(skb, TCA_KIND, IFNAMSIZ, q->ops->id);
1044 if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0)
1045 goto rtattr_failure;
1046
1047 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS,
1048 TCA_XSTATS, q->stats_lock, &d) < 0)
1049 goto rtattr_failure;
1050
1051 if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)
1052 goto rtattr_failure;
1053
1054 if (gnet_stats_finish_copy(&d) < 0)
1055 goto rtattr_failure;
1056
27a884dc 1057 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1da177e4
LT
1058 return skb->len;
1059
1060nlmsg_failure:
1061rtattr_failure:
dc5fc579 1062 nlmsg_trim(skb, b);
1da177e4
LT
1063 return -1;
1064}
1065
1066static int tclass_notify(struct sk_buff *oskb, struct nlmsghdr *n,
1067 struct Qdisc *q, unsigned long cl, int event)
1068{
1069 struct sk_buff *skb;
1070 u32 pid = oskb ? NETLINK_CB(oskb).pid : 0;
1071
1072 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1073 if (!skb)
1074 return -ENOBUFS;
1075
1076 if (tc_fill_tclass(skb, q, cl, pid, n->nlmsg_seq, 0, event) < 0) {
1077 kfree_skb(skb);
1078 return -EINVAL;
1079 }
1080
ac6d439d 1081 return rtnetlink_send(skb, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO);
1da177e4
LT
1082}
1083
1084struct qdisc_dump_args
1085{
1086 struct qdisc_walker w;
1087 struct sk_buff *skb;
1088 struct netlink_callback *cb;
1089};
1090
1091static int qdisc_class_dump(struct Qdisc *q, unsigned long cl, struct qdisc_walker *arg)
1092{
1093 struct qdisc_dump_args *a = (struct qdisc_dump_args *)arg;
1094
1095 return tc_fill_tclass(a->skb, q, cl, NETLINK_CB(a->cb->skb).pid,
1096 a->cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTCLASS);
1097}
1098
1099static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
1100{
1101 int t;
1102 int s_t;
1103 struct net_device *dev;
1104 struct Qdisc *q;
1105 struct tcmsg *tcm = (struct tcmsg*)NLMSG_DATA(cb->nlh);
1106 struct qdisc_dump_args arg;
1107
1108 if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm)))
1109 return 0;
1110 if ((dev = dev_get_by_index(tcm->tcm_ifindex)) == NULL)
1111 return 0;
1112
1113 s_t = cb->args[0];
1114 t = 0;
1115
1da177e4
LT
1116 list_for_each_entry(q, &dev->qdisc_list, list) {
1117 if (t < s_t || !q->ops->cl_ops ||
1118 (tcm->tcm_parent &&
1119 TC_H_MAJ(tcm->tcm_parent) != q->handle)) {
1120 t++;
1121 continue;
1122 }
1123 if (t > s_t)
1124 memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
1125 arg.w.fn = qdisc_class_dump;
1126 arg.skb = skb;
1127 arg.cb = cb;
1128 arg.w.stop = 0;
1129 arg.w.skip = cb->args[1];
1130 arg.w.count = 0;
1131 q->ops->cl_ops->walk(q, &arg.w);
1132 cb->args[1] = arg.w.count;
1133 if (arg.w.stop)
1134 break;
1135 t++;
1136 }
1da177e4
LT
1137
1138 cb->args[0] = t;
1139
1140 dev_put(dev);
1141 return skb->len;
1142}
1143
1144/* Main classifier routine: scans classifier chain attached
1145 to this qdisc, (optionally) tests for protocol and asks
1146 specific classifiers.
1147 */
73ca4918
PM
1148int tc_classify_compat(struct sk_buff *skb, struct tcf_proto *tp,
1149 struct tcf_result *res)
1150{
1151 __be16 protocol = skb->protocol;
1152 int err = 0;
1153
1154 for (; tp; tp = tp->next) {
1155 if ((tp->protocol == protocol ||
1156 tp->protocol == htons(ETH_P_ALL)) &&
1157 (err = tp->classify(skb, tp, res)) >= 0) {
1158#ifdef CONFIG_NET_CLS_ACT
1159 if (err != TC_ACT_RECLASSIFY && skb->tc_verd)
1160 skb->tc_verd = SET_TC_VERD(skb->tc_verd, 0);
1161#endif
1162 return err;
1163 }
1164 }
1165 return -1;
1166}
1167EXPORT_SYMBOL(tc_classify_compat);
1168
1da177e4 1169int tc_classify(struct sk_buff *skb, struct tcf_proto *tp,
73ca4918 1170 struct tcf_result *res)
1da177e4
LT
1171{
1172 int err = 0;
73ca4918 1173 __be16 protocol;
1da177e4
LT
1174#ifdef CONFIG_NET_CLS_ACT
1175 struct tcf_proto *otp = tp;
1176reclassify:
1177#endif
1178 protocol = skb->protocol;
1179
73ca4918 1180 err = tc_classify_compat(skb, tp, res);
1da177e4 1181#ifdef CONFIG_NET_CLS_ACT
73ca4918
PM
1182 if (err == TC_ACT_RECLASSIFY) {
1183 u32 verd = G_TC_VERD(skb->tc_verd);
1184 tp = otp;
1185
1186 if (verd++ >= MAX_REC_LOOP) {
1187 printk("rule prio %u protocol %02x reclassify loop, "
1188 "packet dropped\n",
1189 tp->prio&0xffff, ntohs(tp->protocol));
1190 return TC_ACT_SHOT;
1da177e4 1191 }
73ca4918
PM
1192 skb->tc_verd = SET_TC_VERD(skb->tc_verd, verd);
1193 goto reclassify;
1da177e4 1194 }
73ca4918
PM
1195#endif
1196 return err;
1da177e4 1197}
73ca4918 1198EXPORT_SYMBOL(tc_classify);
1da177e4 1199
a48b5a61
PM
1200void tcf_destroy(struct tcf_proto *tp)
1201{
1202 tp->ops->destroy(tp);
1203 module_put(tp->ops->owner);
1204 kfree(tp);
1205}
1206
1207void tcf_destroy_chain(struct tcf_proto *fl)
1208{
1209 struct tcf_proto *tp;
1210
1211 while ((tp = fl) != NULL) {
1212 fl = tp->next;
1213 tcf_destroy(tp);
1214 }
1215}
1216EXPORT_SYMBOL(tcf_destroy_chain);
1217
1da177e4
LT
1218#ifdef CONFIG_PROC_FS
1219static int psched_show(struct seq_file *seq, void *v)
1220{
1221 seq_printf(seq, "%08x %08x %08x %08x\n",
641b9e0e 1222 (u32)NSEC_PER_USEC, (u32)PSCHED_US2NS(1),
514bca32
PM
1223 1000000,
1224 (u32)NSEC_PER_SEC/(u32)ktime_to_ns(KTIME_MONOTONIC_RES));
1da177e4
LT
1225
1226 return 0;
1227}
1228
1229static int psched_open(struct inode *inode, struct file *file)
1230{
1231 return single_open(file, psched_show, PDE(inode)->data);
1232}
1233
da7071d7 1234static const struct file_operations psched_fops = {
1da177e4
LT
1235 .owner = THIS_MODULE,
1236 .open = psched_open,
1237 .read = seq_read,
1238 .llseek = seq_lseek,
1239 .release = single_release,
10297b99 1240};
1da177e4
LT
1241#endif
1242
1da177e4
LT
1243static int __init pktsched_init(void)
1244{
1da177e4
LT
1245 register_qdisc(&pfifo_qdisc_ops);
1246 register_qdisc(&bfifo_qdisc_ops);
1247 proc_net_fops_create("psched", 0, &psched_fops);
1248
be577ddc
TG
1249 rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL);
1250 rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL);
1251 rtnl_register(PF_UNSPEC, RTM_GETQDISC, tc_get_qdisc, tc_dump_qdisc);
1252 rtnl_register(PF_UNSPEC, RTM_NEWTCLASS, tc_ctl_tclass, NULL);
1253 rtnl_register(PF_UNSPEC, RTM_DELTCLASS, tc_ctl_tclass, NULL);
1254 rtnl_register(PF_UNSPEC, RTM_GETTCLASS, tc_ctl_tclass, tc_dump_tclass);
1255
1da177e4
LT
1256 return 0;
1257}
1258
1259subsys_initcall(pktsched_init);
1260
1261EXPORT_SYMBOL(qdisc_get_rtab);
1262EXPORT_SYMBOL(qdisc_put_rtab);
1263EXPORT_SYMBOL(register_qdisc);
1264EXPORT_SYMBOL(unregister_qdisc);