net: eliminate refcounting in backlog queue
[linux-2.6-block.git] / net / sched / sch_netem.c
CommitLineData
1da177e4
LT
1/*
2 * net/sched/sch_netem.c Network emulator
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
798b6b19 7 * 2 of the License.
1da177e4
LT
8 *
9 * Many of the algorithms and ideas for this came from
10297b99 10 * NIST Net which is not copyrighted.
1da177e4
LT
11 *
12 * Authors: Stephen Hemminger <shemminger@osdl.org>
13 * Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
14 */
15
1da177e4 16#include <linux/module.h>
1da177e4
LT
17#include <linux/types.h>
18#include <linux/kernel.h>
19#include <linux/errno.h>
1da177e4
LT
20#include <linux/skbuff.h>
21#include <linux/rtnetlink.h>
22
dc5fc579 23#include <net/netlink.h>
1da177e4
LT
24#include <net/pkt_sched.h>
25
c865e5d9 26#define VERSION "1.2"
eb229c4c 27
1da177e4
LT
28/* Network Emulation Queuing algorithm.
29 ====================================
30
31 Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
32 Network Emulation Tool
33 [2] Luigi Rizzo, DummyNet for FreeBSD
34
35 ----------------------------------------------------------------
36
37 This started out as a simple way to delay outgoing packets to
38 test TCP but has grown to include most of the functionality
39 of a full blown network emulator like NISTnet. It can delay
40 packets and add random jitter (and correlation). The random
41 distribution can be loaded from a table as well to provide
42 normal, Pareto, or experimental curves. Packet loss,
43 duplication, and reordering can also be emulated.
44
45 This qdisc does not do classification that can be handled in
46 layering other disciplines. It does not need to do bandwidth
47 control either since that can be handled by using token
48 bucket or other rate control.
49
50 The simulator is limited by the Linux timer resolution
51 and will create packet bursts on the HZ boundary (1ms).
52*/
53
54struct netem_sched_data {
55 struct Qdisc *qdisc;
59cb5c67 56 struct qdisc_watchdog watchdog;
1da177e4 57
b407621c
SH
58 psched_tdiff_t latency;
59 psched_tdiff_t jitter;
60
1da177e4
LT
61 u32 loss;
62 u32 limit;
63 u32 counter;
64 u32 gap;
1da177e4 65 u32 duplicate;
0dca51d3 66 u32 reorder;
c865e5d9 67 u32 corrupt;
1da177e4
LT
68
69 struct crndstate {
b407621c
SH
70 u32 last;
71 u32 rho;
c865e5d9 72 } delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
1da177e4
LT
73
74 struct disttable {
75 u32 size;
76 s16 table[0];
77 } *delay_dist;
78};
79
80/* Time stamp put into socket buffer control block */
81struct netem_skb_cb {
82 psched_time_t time_to_send;
83};
84
5f86173b
JK
85static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
86{
175f9c1b
JK
87 BUILD_BUG_ON(sizeof(skb->cb) <
88 sizeof(struct qdisc_skb_cb) + sizeof(struct netem_skb_cb));
89 return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data;
5f86173b
JK
90}
91
1da177e4
LT
92/* init_crandom - initialize correlated random number generator
93 * Use entropy source for initial seed.
94 */
95static void init_crandom(struct crndstate *state, unsigned long rho)
96{
97 state->rho = rho;
98 state->last = net_random();
99}
100
101/* get_crandom - correlated random number generator
102 * Next number depends on last value.
103 * rho is scaled to avoid floating point.
104 */
b407621c 105static u32 get_crandom(struct crndstate *state)
1da177e4
LT
106{
107 u64 value, rho;
108 unsigned long answer;
109
bb2f8cc0 110 if (state->rho == 0) /* no correlation */
1da177e4
LT
111 return net_random();
112
113 value = net_random();
114 rho = (u64)state->rho + 1;
115 answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
116 state->last = answer;
117 return answer;
118}
119
120/* tabledist - return a pseudo-randomly distributed value with mean mu and
121 * std deviation sigma. Uses table lookup to approximate the desired
122 * distribution, and a uniformly-distributed pseudo-random source.
123 */
b407621c
SH
124static psched_tdiff_t tabledist(psched_tdiff_t mu, psched_tdiff_t sigma,
125 struct crndstate *state,
126 const struct disttable *dist)
1da177e4 127{
b407621c
SH
128 psched_tdiff_t x;
129 long t;
130 u32 rnd;
1da177e4
LT
131
132 if (sigma == 0)
133 return mu;
134
135 rnd = get_crandom(state);
136
137 /* default uniform distribution */
10297b99 138 if (dist == NULL)
1da177e4
LT
139 return (rnd % (2*sigma)) - sigma + mu;
140
141 t = dist->table[rnd % dist->size];
142 x = (sigma % NETEM_DIST_SCALE) * t;
143 if (x >= 0)
144 x += NETEM_DIST_SCALE/2;
145 else
146 x -= NETEM_DIST_SCALE/2;
147
148 return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
149}
150
0afb51e7
SH
151/*
152 * Insert one skb into qdisc.
153 * Note: parent depends on return value to account for queue length.
154 * NET_XMIT_DROP: queue length didn't change.
155 * NET_XMIT_SUCCESS: one skb was queued.
156 */
1da177e4
LT
157static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
158{
159 struct netem_sched_data *q = qdisc_priv(sch);
89e1df74
GC
160 /* We don't fill cb now as skb_unshare() may invalidate it */
161 struct netem_skb_cb *cb;
0afb51e7 162 struct sk_buff *skb2;
1da177e4 163 int ret;
0afb51e7 164 int count = 1;
1da177e4 165
771018e7 166 pr_debug("netem_enqueue skb=%p\n", skb);
1da177e4 167
0afb51e7
SH
168 /* Random duplication */
169 if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
170 ++count;
171
1da177e4 172 /* Random packet drop 0 => none, ~0 => all */
0afb51e7
SH
173 if (q->loss && q->loss >= get_crandom(&q->loss_cor))
174 --count;
175
176 if (count == 0) {
1da177e4
LT
177 sch->qstats.drops++;
178 kfree_skb(skb);
89bbb0a3 179 return NET_XMIT_BYPASS;
1da177e4
LT
180 }
181
4e8a5201
DM
182 skb_orphan(skb);
183
0afb51e7
SH
184 /*
185 * If we need to duplicate packet, then re-insert at top of the
186 * qdisc tree, since parent queuer expects that only one
187 * skb will be queued.
188 */
189 if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
7698b4fc 190 struct Qdisc *rootq = qdisc_root(sch);
0afb51e7
SH
191 u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
192 q->duplicate = 0;
193
5f86173b 194 qdisc_enqueue_root(skb2, rootq);
0afb51e7 195 q->duplicate = dupsave;
1da177e4
LT
196 }
197
c865e5d9
SH
198 /*
199 * Randomized packet corruption.
200 * Make copy if needed since we are modifying
201 * If packet is going to be hardware checksummed, then
202 * do it now in software before we mangle it.
203 */
204 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
205 if (!(skb = skb_unshare(skb, GFP_ATOMIC))
84fa7933
PM
206 || (skb->ip_summed == CHECKSUM_PARTIAL
207 && skb_checksum_help(skb))) {
c865e5d9
SH
208 sch->qstats.drops++;
209 return NET_XMIT_DROP;
210 }
211
212 skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
213 }
214
5f86173b 215 cb = netem_skb_cb(skb);
0dca51d3
SH
216 if (q->gap == 0 /* not doing reordering */
217 || q->counter < q->gap /* inside last reordering gap */
218 || q->reorder < get_crandom(&q->reorder_cor)) {
0f9f32ac 219 psched_time_t now;
07aaa115
SH
220 psched_tdiff_t delay;
221
222 delay = tabledist(q->latency, q->jitter,
223 &q->delay_cor, q->delay_dist);
224
3bebcda2 225 now = psched_get_time();
7c59e25f 226 cb->time_to_send = now + delay;
1da177e4 227 ++q->counter;
5f86173b 228 ret = qdisc_enqueue(skb, q->qdisc);
1da177e4 229 } else {
10297b99 230 /*
0dca51d3
SH
231 * Do re-ordering by putting one out of N packets at the front
232 * of the queue.
233 */
3bebcda2 234 cb->time_to_send = psched_get_time();
0dca51d3 235 q->counter = 0;
0f9f32ac 236 ret = q->qdisc->ops->requeue(skb, q->qdisc);
1da177e4
LT
237 }
238
239 if (likely(ret == NET_XMIT_SUCCESS)) {
240 sch->q.qlen++;
0abf77e5 241 sch->bstats.bytes += qdisc_pkt_len(skb);
1da177e4
LT
242 sch->bstats.packets++;
243 } else
244 sch->qstats.drops++;
245
d5d75cd6 246 pr_debug("netem: enqueue ret %d\n", ret);
1da177e4
LT
247 return ret;
248}
249
250/* Requeue packets but don't change time stamp */
251static int netem_requeue(struct sk_buff *skb, struct Qdisc *sch)
252{
253 struct netem_sched_data *q = qdisc_priv(sch);
254 int ret;
255
256 if ((ret = q->qdisc->ops->requeue(skb, q->qdisc)) == 0) {
257 sch->q.qlen++;
258 sch->qstats.requeues++;
259 }
260
261 return ret;
262}
263
264static unsigned int netem_drop(struct Qdisc* sch)
265{
266 struct netem_sched_data *q = qdisc_priv(sch);
6d037a26 267 unsigned int len = 0;
1da177e4 268
6d037a26 269 if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) {
1da177e4
LT
270 sch->q.qlen--;
271 sch->qstats.drops++;
272 }
273 return len;
274}
275
1da177e4
LT
276static struct sk_buff *netem_dequeue(struct Qdisc *sch)
277{
278 struct netem_sched_data *q = qdisc_priv(sch);
279 struct sk_buff *skb;
280
11274e5a
SH
281 smp_mb();
282 if (sch->flags & TCQ_F_THROTTLED)
283 return NULL;
284
1da177e4 285 skb = q->qdisc->dequeue(q->qdisc);
771018e7 286 if (skb) {
5f86173b 287 const struct netem_skb_cb *cb = netem_skb_cb(skb);
3bebcda2 288 psched_time_t now = psched_get_time();
0f9f32ac
SH
289
290 /* if more time remaining? */
104e0878 291 if (cb->time_to_send <= now) {
0f9f32ac
SH
292 pr_debug("netem_dequeue: return skb=%p\n", skb);
293 sch->q.qlen--;
0f9f32ac 294 return skb;
07aaa115 295 }
11274e5a
SH
296
297 if (unlikely(q->qdisc->ops->requeue(skb, q->qdisc) != NET_XMIT_SUCCESS)) {
298 qdisc_tree_decrease_qlen(q->qdisc, 1);
299 sch->qstats.drops++;
300 printk(KERN_ERR "netem: %s could not requeue\n",
301 q->qdisc->ops->id);
302 }
303
304 qdisc_watchdog_schedule(&q->watchdog, cb->time_to_send);
0f9f32ac
SH
305 }
306
307 return NULL;
1da177e4
LT
308}
309
1da177e4
LT
310static void netem_reset(struct Qdisc *sch)
311{
312 struct netem_sched_data *q = qdisc_priv(sch);
313
314 qdisc_reset(q->qdisc);
1da177e4 315 sch->q.qlen = 0;
59cb5c67 316 qdisc_watchdog_cancel(&q->watchdog);
1da177e4
LT
317}
318
1da177e4
LT
319/*
320 * Distribution data is a variable size payload containing
321 * signed 16 bit values.
322 */
1e90474c 323static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
1da177e4
LT
324{
325 struct netem_sched_data *q = qdisc_priv(sch);
1e90474c
PM
326 unsigned long n = nla_len(attr)/sizeof(__s16);
327 const __s16 *data = nla_data(attr);
7698b4fc 328 spinlock_t *root_lock;
1da177e4
LT
329 struct disttable *d;
330 int i;
331
332 if (n > 65536)
333 return -EINVAL;
334
335 d = kmalloc(sizeof(*d) + n*sizeof(d->table[0]), GFP_KERNEL);
336 if (!d)
337 return -ENOMEM;
338
339 d->size = n;
340 for (i = 0; i < n; i++)
341 d->table[i] = data[i];
10297b99 342
7698b4fc
DM
343 root_lock = qdisc_root_lock(sch);
344
345 spin_lock_bh(root_lock);
1da177e4 346 d = xchg(&q->delay_dist, d);
7698b4fc 347 spin_unlock_bh(root_lock);
1da177e4
LT
348
349 kfree(d);
350 return 0;
351}
352
1e90474c 353static int get_correlation(struct Qdisc *sch, const struct nlattr *attr)
1da177e4
LT
354{
355 struct netem_sched_data *q = qdisc_priv(sch);
1e90474c 356 const struct tc_netem_corr *c = nla_data(attr);
1da177e4 357
1da177e4
LT
358 init_crandom(&q->delay_cor, c->delay_corr);
359 init_crandom(&q->loss_cor, c->loss_corr);
360 init_crandom(&q->dup_cor, c->dup_corr);
361 return 0;
362}
363
1e90474c 364static int get_reorder(struct Qdisc *sch, const struct nlattr *attr)
0dca51d3
SH
365{
366 struct netem_sched_data *q = qdisc_priv(sch);
1e90474c 367 const struct tc_netem_reorder *r = nla_data(attr);
0dca51d3 368
0dca51d3
SH
369 q->reorder = r->probability;
370 init_crandom(&q->reorder_cor, r->correlation);
371 return 0;
372}
373
1e90474c 374static int get_corrupt(struct Qdisc *sch, const struct nlattr *attr)
c865e5d9
SH
375{
376 struct netem_sched_data *q = qdisc_priv(sch);
1e90474c 377 const struct tc_netem_corrupt *r = nla_data(attr);
c865e5d9 378
c865e5d9
SH
379 q->corrupt = r->probability;
380 init_crandom(&q->corrupt_cor, r->correlation);
381 return 0;
382}
383
27a3421e
PM
384static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
385 [TCA_NETEM_CORR] = { .len = sizeof(struct tc_netem_corr) },
386 [TCA_NETEM_REORDER] = { .len = sizeof(struct tc_netem_reorder) },
387 [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) },
388};
389
c865e5d9 390/* Parse netlink message to set options */
1e90474c 391static int netem_change(struct Qdisc *sch, struct nlattr *opt)
1da177e4
LT
392{
393 struct netem_sched_data *q = qdisc_priv(sch);
b03f4672 394 struct nlattr *tb[TCA_NETEM_MAX + 1];
1da177e4
LT
395 struct tc_netem_qopt *qopt;
396 int ret;
10297b99 397
b03f4672 398 if (opt == NULL)
1da177e4
LT
399 return -EINVAL;
400
27a3421e
PM
401 ret = nla_parse_nested_compat(tb, TCA_NETEM_MAX, opt, netem_policy,
402 qopt, sizeof(*qopt));
b03f4672
PM
403 if (ret < 0)
404 return ret;
405
fb0305ce 406 ret = fifo_set_limit(q->qdisc, qopt->limit);
1da177e4
LT
407 if (ret) {
408 pr_debug("netem: can't set fifo limit\n");
409 return ret;
410 }
10297b99 411
1da177e4
LT
412 q->latency = qopt->latency;
413 q->jitter = qopt->jitter;
414 q->limit = qopt->limit;
415 q->gap = qopt->gap;
0dca51d3 416 q->counter = 0;
1da177e4
LT
417 q->loss = qopt->loss;
418 q->duplicate = qopt->duplicate;
419
bb2f8cc0
SH
420 /* for compatibility with earlier versions.
421 * if gap is set, need to assume 100% probability
0dca51d3 422 */
a362e0a7
SH
423 if (q->gap)
424 q->reorder = ~0;
0dca51d3 425
b03f4672
PM
426 if (tb[TCA_NETEM_CORR]) {
427 ret = get_correlation(sch, tb[TCA_NETEM_CORR]);
428 if (ret)
429 return ret;
430 }
1da177e4 431
b03f4672
PM
432 if (tb[TCA_NETEM_DELAY_DIST]) {
433 ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST]);
434 if (ret)
435 return ret;
436 }
c865e5d9 437
b03f4672
PM
438 if (tb[TCA_NETEM_REORDER]) {
439 ret = get_reorder(sch, tb[TCA_NETEM_REORDER]);
440 if (ret)
441 return ret;
442 }
1da177e4 443
b03f4672
PM
444 if (tb[TCA_NETEM_CORRUPT]) {
445 ret = get_corrupt(sch, tb[TCA_NETEM_CORRUPT]);
446 if (ret)
447 return ret;
c865e5d9 448 }
1da177e4
LT
449
450 return 0;
451}
452
300ce174
SH
453/*
454 * Special case version of FIFO queue for use by netem.
455 * It queues in order based on timestamps in skb's
456 */
457struct fifo_sched_data {
458 u32 limit;
075aa573 459 psched_time_t oldest;
300ce174
SH
460};
461
462static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
463{
464 struct fifo_sched_data *q = qdisc_priv(sch);
465 struct sk_buff_head *list = &sch->q;
5f86173b 466 psched_time_t tnext = netem_skb_cb(nskb)->time_to_send;
300ce174
SH
467 struct sk_buff *skb;
468
469 if (likely(skb_queue_len(list) < q->limit)) {
075aa573 470 /* Optimize for add at tail */
104e0878 471 if (likely(skb_queue_empty(list) || tnext >= q->oldest)) {
075aa573
SH
472 q->oldest = tnext;
473 return qdisc_enqueue_tail(nskb, sch);
474 }
475
300ce174 476 skb_queue_reverse_walk(list, skb) {
5f86173b 477 const struct netem_skb_cb *cb = netem_skb_cb(skb);
300ce174 478
104e0878 479 if (tnext >= cb->time_to_send)
300ce174
SH
480 break;
481 }
482
483 __skb_queue_after(list, skb, nskb);
484
0abf77e5
JK
485 sch->qstats.backlog += qdisc_pkt_len(nskb);
486 sch->bstats.bytes += qdisc_pkt_len(nskb);
300ce174
SH
487 sch->bstats.packets++;
488
489 return NET_XMIT_SUCCESS;
490 }
491
075aa573 492 return qdisc_reshape_fail(nskb, sch);
300ce174
SH
493}
494
1e90474c 495static int tfifo_init(struct Qdisc *sch, struct nlattr *opt)
300ce174
SH
496{
497 struct fifo_sched_data *q = qdisc_priv(sch);
498
499 if (opt) {
1e90474c
PM
500 struct tc_fifo_qopt *ctl = nla_data(opt);
501 if (nla_len(opt) < sizeof(*ctl))
300ce174
SH
502 return -EINVAL;
503
504 q->limit = ctl->limit;
505 } else
5ce2d488 506 q->limit = max_t(u32, qdisc_dev(sch)->tx_queue_len, 1);
300ce174 507
a084980d 508 q->oldest = PSCHED_PASTPERFECT;
300ce174
SH
509 return 0;
510}
511
512static int tfifo_dump(struct Qdisc *sch, struct sk_buff *skb)
513{
514 struct fifo_sched_data *q = qdisc_priv(sch);
515 struct tc_fifo_qopt opt = { .limit = q->limit };
516
1e90474c 517 NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
300ce174
SH
518 return skb->len;
519
1e90474c 520nla_put_failure:
300ce174
SH
521 return -1;
522}
523
20fea08b 524static struct Qdisc_ops tfifo_qdisc_ops __read_mostly = {
300ce174
SH
525 .id = "tfifo",
526 .priv_size = sizeof(struct fifo_sched_data),
527 .enqueue = tfifo_enqueue,
528 .dequeue = qdisc_dequeue_head,
529 .requeue = qdisc_requeue,
530 .drop = qdisc_queue_drop,
531 .init = tfifo_init,
532 .reset = qdisc_reset_queue,
533 .change = tfifo_init,
534 .dump = tfifo_dump,
535};
536
1e90474c 537static int netem_init(struct Qdisc *sch, struct nlattr *opt)
1da177e4
LT
538{
539 struct netem_sched_data *q = qdisc_priv(sch);
540 int ret;
541
542 if (!opt)
543 return -EINVAL;
544
59cb5c67 545 qdisc_watchdog_init(&q->watchdog, sch);
1da177e4 546
5ce2d488 547 q->qdisc = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
bb949fbd 548 &tfifo_qdisc_ops,
9f9afec4 549 TC_H_MAKE(sch->handle, 1));
1da177e4
LT
550 if (!q->qdisc) {
551 pr_debug("netem: qdisc create failed\n");
552 return -ENOMEM;
553 }
554
555 ret = netem_change(sch, opt);
556 if (ret) {
557 pr_debug("netem: change failed\n");
558 qdisc_destroy(q->qdisc);
559 }
560 return ret;
561}
562
563static void netem_destroy(struct Qdisc *sch)
564{
565 struct netem_sched_data *q = qdisc_priv(sch);
566
59cb5c67 567 qdisc_watchdog_cancel(&q->watchdog);
1da177e4
LT
568 qdisc_destroy(q->qdisc);
569 kfree(q->delay_dist);
570}
571
572static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
573{
574 const struct netem_sched_data *q = qdisc_priv(sch);
27a884dc 575 unsigned char *b = skb_tail_pointer(skb);
1e90474c 576 struct nlattr *nla = (struct nlattr *) b;
1da177e4
LT
577 struct tc_netem_qopt qopt;
578 struct tc_netem_corr cor;
0dca51d3 579 struct tc_netem_reorder reorder;
c865e5d9 580 struct tc_netem_corrupt corrupt;
1da177e4
LT
581
582 qopt.latency = q->latency;
583 qopt.jitter = q->jitter;
584 qopt.limit = q->limit;
585 qopt.loss = q->loss;
586 qopt.gap = q->gap;
587 qopt.duplicate = q->duplicate;
1e90474c 588 NLA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt);
1da177e4
LT
589
590 cor.delay_corr = q->delay_cor.rho;
591 cor.loss_corr = q->loss_cor.rho;
592 cor.dup_corr = q->dup_cor.rho;
1e90474c 593 NLA_PUT(skb, TCA_NETEM_CORR, sizeof(cor), &cor);
0dca51d3
SH
594
595 reorder.probability = q->reorder;
596 reorder.correlation = q->reorder_cor.rho;
1e90474c 597 NLA_PUT(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder);
0dca51d3 598
c865e5d9
SH
599 corrupt.probability = q->corrupt;
600 corrupt.correlation = q->corrupt_cor.rho;
1e90474c 601 NLA_PUT(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt);
c865e5d9 602
1e90474c 603 nla->nla_len = skb_tail_pointer(skb) - b;
1da177e4
LT
604
605 return skb->len;
606
1e90474c 607nla_put_failure:
dc5fc579 608 nlmsg_trim(skb, b);
1da177e4
LT
609 return -1;
610}
611
612static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
613 struct sk_buff *skb, struct tcmsg *tcm)
614{
615 struct netem_sched_data *q = qdisc_priv(sch);
616
617 if (cl != 1) /* only one class */
618 return -ENOENT;
619
620 tcm->tcm_handle |= TC_H_MIN(1);
621 tcm->tcm_info = q->qdisc->handle;
622
623 return 0;
624}
625
626static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
627 struct Qdisc **old)
628{
629 struct netem_sched_data *q = qdisc_priv(sch);
630
631 if (new == NULL)
632 new = &noop_qdisc;
633
634 sch_tree_lock(sch);
635 *old = xchg(&q->qdisc, new);
5e50da01 636 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
1da177e4 637 qdisc_reset(*old);
1da177e4
LT
638 sch_tree_unlock(sch);
639
640 return 0;
641}
642
643static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
644{
645 struct netem_sched_data *q = qdisc_priv(sch);
646 return q->qdisc;
647}
648
649static unsigned long netem_get(struct Qdisc *sch, u32 classid)
650{
651 return 1;
652}
653
654static void netem_put(struct Qdisc *sch, unsigned long arg)
655{
656}
657
10297b99 658static int netem_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
1e90474c 659 struct nlattr **tca, unsigned long *arg)
1da177e4
LT
660{
661 return -ENOSYS;
662}
663
664static int netem_delete(struct Qdisc *sch, unsigned long arg)
665{
666 return -ENOSYS;
667}
668
669static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
670{
671 if (!walker->stop) {
672 if (walker->count >= walker->skip)
673 if (walker->fn(sch, 1, walker) < 0) {
674 walker->stop = 1;
675 return;
676 }
677 walker->count++;
678 }
679}
680
681static struct tcf_proto **netem_find_tcf(struct Qdisc *sch, unsigned long cl)
682{
683 return NULL;
684}
685
20fea08b 686static const struct Qdisc_class_ops netem_class_ops = {
1da177e4
LT
687 .graft = netem_graft,
688 .leaf = netem_leaf,
689 .get = netem_get,
690 .put = netem_put,
691 .change = netem_change_class,
692 .delete = netem_delete,
693 .walk = netem_walk,
694 .tcf_chain = netem_find_tcf,
695 .dump = netem_dump_class,
696};
697
20fea08b 698static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
1da177e4
LT
699 .id = "netem",
700 .cl_ops = &netem_class_ops,
701 .priv_size = sizeof(struct netem_sched_data),
702 .enqueue = netem_enqueue,
703 .dequeue = netem_dequeue,
704 .requeue = netem_requeue,
705 .drop = netem_drop,
706 .init = netem_init,
707 .reset = netem_reset,
708 .destroy = netem_destroy,
709 .change = netem_change,
710 .dump = netem_dump,
711 .owner = THIS_MODULE,
712};
713
714
715static int __init netem_module_init(void)
716{
eb229c4c 717 pr_info("netem: version " VERSION "\n");
1da177e4
LT
718 return register_qdisc(&netem_qdisc_ops);
719}
720static void __exit netem_module_exit(void)
721{
722 unregister_qdisc(&netem_qdisc_ops);
723}
724module_init(netem_module_init)
725module_exit(netem_module_exit)
726MODULE_LICENSE("GPL");