2 * net/sched/sch_netem.c Network emulator
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
9 * Many of the algorithms and ideas for this came from
10 * NIST Net which is not copyrighted.
12 * Authors: Stephen Hemminger <shemminger@osdl.org>
13 * Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
16 #include <linux/module.h>
17 #include <linux/types.h>
18 #include <linux/kernel.h>
19 #include <linux/errno.h>
20 #include <linux/skbuff.h>
21 #include <linux/rtnetlink.h>
23 #include <net/netlink.h>
24 #include <net/pkt_sched.h>
28 /* Network Emulation Queuing algorithm.
29 ====================================
31 Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
32 Network Emulation Tool
33 [2] Luigi Rizzo, DummyNet for FreeBSD
35 ----------------------------------------------------------------
37 This started out as a simple way to delay outgoing packets to
38 test TCP but has grown to include most of the functionality
39 of a full blown network emulator like NISTnet. It can delay
40 packets and add random jitter (and correlation). The random
41 distribution can be loaded from a table as well to provide
42 normal, Pareto, or experimental curves. Packet loss,
43 duplication, and reordering can also be emulated.
45 This qdisc does not do classification that can be handled in
46 layering other disciplines. It does not need to do bandwidth
47 control either since that can be handled by using token
48 bucket or other rate control.
51 struct netem_sched_data {
53 struct qdisc_watchdog watchdog;
55 psched_tdiff_t latency;
56 psched_tdiff_t jitter;
69 } delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
77 /* Time stamp put into socket buffer control block */
79 psched_time_t time_to_send;
82 static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
84 BUILD_BUG_ON(sizeof(skb->cb) <
85 sizeof(struct qdisc_skb_cb) + sizeof(struct netem_skb_cb));
86 return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data;
89 /* init_crandom - initialize correlated random number generator
90 * Use entropy source for initial seed.
92 static void init_crandom(struct crndstate *state, unsigned long rho)
95 state->last = net_random();
98 /* get_crandom - correlated random number generator
99 * Next number depends on last value.
100 * rho is scaled to avoid floating point.
102 static u32 get_crandom(struct crndstate *state)
105 unsigned long answer;
107 if (state->rho == 0) /* no correlation */
110 value = net_random();
111 rho = (u64)state->rho + 1;
112 answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
113 state->last = answer;
117 /* tabledist - return a pseudo-randomly distributed value with mean mu and
118 * std deviation sigma. Uses table lookup to approximate the desired
119 * distribution, and a uniformly-distributed pseudo-random source.
121 static psched_tdiff_t tabledist(psched_tdiff_t mu, psched_tdiff_t sigma,
122 struct crndstate *state,
123 const struct disttable *dist)
132 rnd = get_crandom(state);
134 /* default uniform distribution */
136 return (rnd % (2*sigma)) - sigma + mu;
138 t = dist->table[rnd % dist->size];
139 x = (sigma % NETEM_DIST_SCALE) * t;
141 x += NETEM_DIST_SCALE/2;
143 x -= NETEM_DIST_SCALE/2;
145 return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
149 * Insert one skb into qdisc.
150 * Note: parent depends on return value to account for queue length.
151 * NET_XMIT_DROP: queue length didn't change.
152 * NET_XMIT_SUCCESS: one skb was queued.
154 static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
156 struct netem_sched_data *q = qdisc_priv(sch);
157 /* We don't fill cb now as skb_unshare() may invalidate it */
158 struct netem_skb_cb *cb;
159 struct sk_buff *skb2;
163 pr_debug("netem_enqueue skb=%p\n", skb);
165 /* Random duplication */
166 if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
169 /* Random packet drop 0 => none, ~0 => all */
170 if (q->loss && q->loss >= get_crandom(&q->loss_cor))
176 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
182 * If we need to duplicate packet, then re-insert at top of the
183 * qdisc tree, since parent queuer expects that only one
184 * skb will be queued.
186 if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
187 struct Qdisc *rootq = qdisc_root(sch);
188 u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
191 qdisc_enqueue_root(skb2, rootq);
192 q->duplicate = dupsave;
196 * Randomized packet corruption.
197 * Make copy if needed since we are modifying
198 * If packet is going to be hardware checksummed, then
199 * do it now in software before we mangle it.
201 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
202 if (!(skb = skb_unshare(skb, GFP_ATOMIC))
203 || (skb->ip_summed == CHECKSUM_PARTIAL
204 && skb_checksum_help(skb))) {
206 return NET_XMIT_DROP;
209 skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
212 cb = netem_skb_cb(skb);
213 if (q->gap == 0 /* not doing reordering */
214 || q->counter < q->gap /* inside last reordering gap */
215 || q->reorder < get_crandom(&q->reorder_cor)) {
217 psched_tdiff_t delay;
219 delay = tabledist(q->latency, q->jitter,
220 &q->delay_cor, q->delay_dist);
222 now = psched_get_time();
223 cb->time_to_send = now + delay;
225 ret = qdisc_enqueue(skb, q->qdisc);
228 * Do re-ordering by putting one out of N packets at the front
231 cb->time_to_send = psched_get_time();
234 __skb_queue_head(&q->qdisc->q, skb);
235 q->qdisc->qstats.backlog += qdisc_pkt_len(skb);
236 q->qdisc->qstats.requeues++;
237 ret = NET_XMIT_SUCCESS;
240 if (likely(ret == NET_XMIT_SUCCESS)) {
242 sch->bstats.bytes += qdisc_pkt_len(skb);
243 sch->bstats.packets++;
244 } else if (net_xmit_drop_count(ret)) {
248 pr_debug("netem: enqueue ret %d\n", ret);
252 static unsigned int netem_drop(struct Qdisc* sch)
254 struct netem_sched_data *q = qdisc_priv(sch);
255 unsigned int len = 0;
257 if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) {
264 static struct sk_buff *netem_dequeue(struct Qdisc *sch)
266 struct netem_sched_data *q = qdisc_priv(sch);
270 if (sch->flags & TCQ_F_THROTTLED)
273 skb = q->qdisc->ops->peek(q->qdisc);
275 const struct netem_skb_cb *cb = netem_skb_cb(skb);
276 psched_time_t now = psched_get_time();
278 /* if more time remaining? */
279 if (cb->time_to_send <= now) {
280 skb = qdisc_dequeue_peeked(q->qdisc);
284 pr_debug("netem_dequeue: return skb=%p\n", skb);
289 qdisc_watchdog_schedule(&q->watchdog, cb->time_to_send);
295 static void netem_reset(struct Qdisc *sch)
297 struct netem_sched_data *q = qdisc_priv(sch);
299 qdisc_reset(q->qdisc);
301 qdisc_watchdog_cancel(&q->watchdog);
305 * Distribution data is a variable size payload containing
306 * signed 16 bit values.
308 static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
310 struct netem_sched_data *q = qdisc_priv(sch);
311 unsigned long n = nla_len(attr)/sizeof(__s16);
312 const __s16 *data = nla_data(attr);
313 spinlock_t *root_lock;
320 d = kmalloc(sizeof(*d) + n*sizeof(d->table[0]), GFP_KERNEL);
325 for (i = 0; i < n; i++)
326 d->table[i] = data[i];
328 root_lock = qdisc_root_sleeping_lock(sch);
330 spin_lock_bh(root_lock);
331 kfree(q->delay_dist);
333 spin_unlock_bh(root_lock);
337 static void get_correlation(struct Qdisc *sch, const struct nlattr *attr)
339 struct netem_sched_data *q = qdisc_priv(sch);
340 const struct tc_netem_corr *c = nla_data(attr);
342 init_crandom(&q->delay_cor, c->delay_corr);
343 init_crandom(&q->loss_cor, c->loss_corr);
344 init_crandom(&q->dup_cor, c->dup_corr);
347 static void get_reorder(struct Qdisc *sch, const struct nlattr *attr)
349 struct netem_sched_data *q = qdisc_priv(sch);
350 const struct tc_netem_reorder *r = nla_data(attr);
352 q->reorder = r->probability;
353 init_crandom(&q->reorder_cor, r->correlation);
356 static void get_corrupt(struct Qdisc *sch, const struct nlattr *attr)
358 struct netem_sched_data *q = qdisc_priv(sch);
359 const struct tc_netem_corrupt *r = nla_data(attr);
361 q->corrupt = r->probability;
362 init_crandom(&q->corrupt_cor, r->correlation);
365 static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
366 [TCA_NETEM_CORR] = { .len = sizeof(struct tc_netem_corr) },
367 [TCA_NETEM_REORDER] = { .len = sizeof(struct tc_netem_reorder) },
368 [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) },
371 static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
372 const struct nla_policy *policy, int len)
374 int nested_len = nla_len(nla) - NLA_ALIGN(len);
378 if (nested_len >= nla_attr_size(0))
379 return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len),
381 memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
385 /* Parse netlink message to set options */
386 static int netem_change(struct Qdisc *sch, struct nlattr *opt)
388 struct netem_sched_data *q = qdisc_priv(sch);
389 struct nlattr *tb[TCA_NETEM_MAX + 1];
390 struct tc_netem_qopt *qopt;
396 qopt = nla_data(opt);
397 ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt));
401 ret = fifo_set_limit(q->qdisc, qopt->limit);
403 pr_debug("netem: can't set fifo limit\n");
407 q->latency = qopt->latency;
408 q->jitter = qopt->jitter;
409 q->limit = qopt->limit;
412 q->loss = qopt->loss;
413 q->duplicate = qopt->duplicate;
415 /* for compatibility with earlier versions.
416 * if gap is set, need to assume 100% probability
421 if (tb[TCA_NETEM_CORR])
422 get_correlation(sch, tb[TCA_NETEM_CORR]);
424 if (tb[TCA_NETEM_DELAY_DIST]) {
425 ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST]);
430 if (tb[TCA_NETEM_REORDER])
431 get_reorder(sch, tb[TCA_NETEM_REORDER]);
433 if (tb[TCA_NETEM_CORRUPT])
434 get_corrupt(sch, tb[TCA_NETEM_CORRUPT]);
440 * Special case version of FIFO queue for use by netem.
441 * It queues in order based on timestamps in skb's
443 struct fifo_sched_data {
445 psched_time_t oldest;
448 static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
450 struct fifo_sched_data *q = qdisc_priv(sch);
451 struct sk_buff_head *list = &sch->q;
452 psched_time_t tnext = netem_skb_cb(nskb)->time_to_send;
455 if (likely(skb_queue_len(list) < q->limit)) {
456 /* Optimize for add at tail */
457 if (likely(skb_queue_empty(list) || tnext >= q->oldest)) {
459 return qdisc_enqueue_tail(nskb, sch);
462 skb_queue_reverse_walk(list, skb) {
463 const struct netem_skb_cb *cb = netem_skb_cb(skb);
465 if (tnext >= cb->time_to_send)
469 __skb_queue_after(list, skb, nskb);
471 sch->qstats.backlog += qdisc_pkt_len(nskb);
472 sch->bstats.bytes += qdisc_pkt_len(nskb);
473 sch->bstats.packets++;
475 return NET_XMIT_SUCCESS;
478 return qdisc_reshape_fail(nskb, sch);
481 static int tfifo_init(struct Qdisc *sch, struct nlattr *opt)
483 struct fifo_sched_data *q = qdisc_priv(sch);
486 struct tc_fifo_qopt *ctl = nla_data(opt);
487 if (nla_len(opt) < sizeof(*ctl))
490 q->limit = ctl->limit;
492 q->limit = max_t(u32, qdisc_dev(sch)->tx_queue_len, 1);
494 q->oldest = PSCHED_PASTPERFECT;
498 static int tfifo_dump(struct Qdisc *sch, struct sk_buff *skb)
500 struct fifo_sched_data *q = qdisc_priv(sch);
501 struct tc_fifo_qopt opt = { .limit = q->limit };
503 NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
510 static struct Qdisc_ops tfifo_qdisc_ops __read_mostly = {
512 .priv_size = sizeof(struct fifo_sched_data),
513 .enqueue = tfifo_enqueue,
514 .dequeue = qdisc_dequeue_head,
515 .peek = qdisc_peek_head,
516 .drop = qdisc_queue_drop,
518 .reset = qdisc_reset_queue,
519 .change = tfifo_init,
523 static int netem_init(struct Qdisc *sch, struct nlattr *opt)
525 struct netem_sched_data *q = qdisc_priv(sch);
531 qdisc_watchdog_init(&q->watchdog, sch);
533 q->qdisc = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
535 TC_H_MAKE(sch->handle, 1));
537 pr_debug("netem: qdisc create failed\n");
541 ret = netem_change(sch, opt);
543 pr_debug("netem: change failed\n");
544 qdisc_destroy(q->qdisc);
549 static void netem_destroy(struct Qdisc *sch)
551 struct netem_sched_data *q = qdisc_priv(sch);
553 qdisc_watchdog_cancel(&q->watchdog);
554 qdisc_destroy(q->qdisc);
555 kfree(q->delay_dist);
558 static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
560 const struct netem_sched_data *q = qdisc_priv(sch);
561 unsigned char *b = skb_tail_pointer(skb);
562 struct nlattr *nla = (struct nlattr *) b;
563 struct tc_netem_qopt qopt;
564 struct tc_netem_corr cor;
565 struct tc_netem_reorder reorder;
566 struct tc_netem_corrupt corrupt;
568 qopt.latency = q->latency;
569 qopt.jitter = q->jitter;
570 qopt.limit = q->limit;
573 qopt.duplicate = q->duplicate;
574 NLA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt);
576 cor.delay_corr = q->delay_cor.rho;
577 cor.loss_corr = q->loss_cor.rho;
578 cor.dup_corr = q->dup_cor.rho;
579 NLA_PUT(skb, TCA_NETEM_CORR, sizeof(cor), &cor);
581 reorder.probability = q->reorder;
582 reorder.correlation = q->reorder_cor.rho;
583 NLA_PUT(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder);
585 corrupt.probability = q->corrupt;
586 corrupt.correlation = q->corrupt_cor.rho;
587 NLA_PUT(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt);
589 nla->nla_len = skb_tail_pointer(skb) - b;
598 static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
600 .priv_size = sizeof(struct netem_sched_data),
601 .enqueue = netem_enqueue,
602 .dequeue = netem_dequeue,
603 .peek = qdisc_peek_dequeued,
606 .reset = netem_reset,
607 .destroy = netem_destroy,
608 .change = netem_change,
610 .owner = THIS_MODULE,
614 static int __init netem_module_init(void)
616 pr_info("netem: version " VERSION "\n");
617 return register_qdisc(&netem_qdisc_ops);
619 static void __exit netem_module_exit(void)
621 unregister_qdisc(&netem_qdisc_ops);
623 module_init(netem_module_init)
624 module_exit(netem_module_exit)
625 MODULE_LICENSE("GPL");