Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
[linux-block.git] / net / sched / sch_netem.c
CommitLineData
84a14ae8 1// SPDX-License-Identifier: GPL-2.0-only
1da177e4
LT
2/*
3 * net/sched/sch_netem.c Network emulator
4 *
1da177e4 5 * Many of the algorithms and ideas for this came from
10297b99 6 * NIST Net which is not copyrighted.
1da177e4
LT
7 *
8 * Authors: Stephen Hemminger <shemminger@osdl.org>
9 * Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
10 */
11
b7f080cf 12#include <linux/mm.h>
1da177e4 13#include <linux/module.h>
5a0e3ad6 14#include <linux/slab.h>
1da177e4
LT
15#include <linux/types.h>
16#include <linux/kernel.h>
17#include <linux/errno.h>
1da177e4 18#include <linux/skbuff.h>
78776d3f 19#include <linux/vmalloc.h>
1da177e4 20#include <linux/rtnetlink.h>
90b41a1c 21#include <linux/reciprocal_div.h>
aec0a40a 22#include <linux/rbtree.h>
1da177e4 23
dc5fc579 24#include <net/netlink.h>
1da177e4 25#include <net/pkt_sched.h>
e4ae004b 26#include <net/inet_ecn.h>
1da177e4 27
250a65f7 28#define VERSION "1.3"
eb229c4c 29
1da177e4
LT
30/* Network Emulation Queuing algorithm.
31 ====================================
32
33 Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
34 Network Emulation Tool
35 [2] Luigi Rizzo, DummyNet for FreeBSD
36
37 ----------------------------------------------------------------
38
39 This started out as a simple way to delay outgoing packets to
40 test TCP but has grown to include most of the functionality
41 of a full blown network emulator like NISTnet. It can delay
42 packets and add random jitter (and correlation). The random
43 distribution can be loaded from a table as well to provide
44 normal, Pareto, or experimental curves. Packet loss,
45 duplication, and reordering can also be emulated.
46
47 This qdisc does not do classification that can be handled in
48 layering other disciplines. It does not need to do bandwidth
49 control either since that can be handled by using token
50 bucket or other rate control.
661b7972 51
52 Correlated Loss Generator models
53
54 Added generation of correlated loss according to the
55 "Gilbert-Elliot" model, a 4-state markov model.
56
57 References:
58 [1] NetemCLG Home http://netgroup.uniroma2.it/NetemCLG
59 [2] S. Salsano, F. Ludovici, A. Ordine, "Definition of a general
60 and intuitive loss model for packet networks and its implementation
61 in the Netem module in the Linux kernel", available in [1]
62
63 Authors: Stefano Salsano <stefano.salsano at uniroma2.it
64 Fabio Ludovici <fabio.ludovici at yahoo.it>
1da177e4
LT
65*/
66
0a9fe5c3
YS
67struct disttable {
68 u32 size;
b90feaff 69 s16 table[];
0a9fe5c3
YS
70};
71
1da177e4 72struct netem_sched_data {
aec0a40a
ED
73 /* internal t(ime)fifo qdisc uses t_root and sch->limit */
74 struct rb_root t_root;
50612537 75
d66280b1
PO
76 /* a linear queue; reduces rbtree rebalancing when jitter is low */
77 struct sk_buff *t_head;
78 struct sk_buff *t_tail;
79
50612537 80 /* optional qdisc for classful handling (NULL at netem init) */
1da177e4 81 struct Qdisc *qdisc;
50612537 82
59cb5c67 83 struct qdisc_watchdog watchdog;
1da177e4 84
112f9cb6
DT
85 s64 latency;
86 s64 jitter;
b407621c 87
1da177e4 88 u32 loss;
e4ae004b 89 u32 ecn;
1da177e4
LT
90 u32 limit;
91 u32 counter;
92 u32 gap;
1da177e4 93 u32 duplicate;
0dca51d3 94 u32 reorder;
c865e5d9 95 u32 corrupt;
6a031f67 96 u64 rate;
90b41a1c
HPP
97 s32 packet_overhead;
98 u32 cell_size;
809fa972 99 struct reciprocal_value cell_size_reciprocal;
90b41a1c 100 s32 cell_overhead;
1da177e4
LT
101
102 struct crndstate {
b407621c
SH
103 u32 last;
104 u32 rho;
c865e5d9 105 } delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
1da177e4 106
0a9fe5c3 107 struct disttable *delay_dist;
661b7972 108
109 enum {
110 CLG_RANDOM,
111 CLG_4_STATES,
112 CLG_GILB_ELL,
113 } loss_model;
114
a6e2fe17
YY
115 enum {
116 TX_IN_GAP_PERIOD = 1,
117 TX_IN_BURST_PERIOD,
118 LOST_IN_GAP_PERIOD,
119 LOST_IN_BURST_PERIOD,
120 } _4_state_model;
121
c045a734
YY
122 enum {
123 GOOD_STATE = 1,
124 BAD_STATE,
125 } GE_state_model;
126
661b7972 127 /* Correlated Loss Generation models */
128 struct clgstate {
129 /* state of the Markov chain */
130 u8 state;
131
132 /* 4-states and Gilbert-Elliot models */
133 u32 a1; /* p13 for 4-states or p for GE */
134 u32 a2; /* p31 for 4-states or r for GE */
135 u32 a3; /* p32 for 4-states or h for GE */
136 u32 a4; /* p14 for 4-states or 1-k for GE */
137 u32 a5; /* p23 used only in 4-states */
138 } clg;
139
836af83b
DT
140 struct tc_netem_slot slot_config;
141 struct slotstate {
142 u64 slot_next;
143 s32 packets_left;
144 s32 bytes_left;
145 } slot;
146
0a9fe5c3 147 struct disttable *slot_dist;
1da177e4
LT
148};
149
50612537
ED
150/* Time stamp put into socket buffer control block
151 * Only valid when skbs are in our internal t(ime)fifo queue.
56b17425
ED
152 *
153 * As skb->rbnode uses same storage than skb->next, skb->prev and skb->tstamp,
154 * and skb->next & skb->prev are scratch space for a qdisc,
155 * we save skb->tstamp value in skb->cb[] before destroying it.
50612537 156 */
1da177e4 157struct netem_skb_cb {
112f9cb6 158 u64 time_to_send;
1da177e4
LT
159};
160
5f86173b
JK
161static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
162{
aec0a40a 163 /* we assume we can use skb next/prev/tstamp as storage for rb_node */
16bda13d 164 qdisc_cb_private_validate(skb, sizeof(struct netem_skb_cb));
175f9c1b 165 return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data;
5f86173b
JK
166}
167
1da177e4
LT
168/* init_crandom - initialize correlated random number generator
169 * Use entropy source for initial seed.
170 */
171static void init_crandom(struct crndstate *state, unsigned long rho)
172{
173 state->rho = rho;
a251c17a 174 state->last = get_random_u32();
1da177e4
LT
175}
176
177/* get_crandom - correlated random number generator
178 * Next number depends on last value.
179 * rho is scaled to avoid floating point.
180 */
b407621c 181static u32 get_crandom(struct crndstate *state)
1da177e4
LT
182{
183 u64 value, rho;
184 unsigned long answer;
185
0a9fe5c3 186 if (!state || state->rho == 0) /* no correlation */
a251c17a 187 return get_random_u32();
1da177e4 188
a251c17a 189 value = get_random_u32();
1da177e4
LT
190 rho = (u64)state->rho + 1;
191 answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
192 state->last = answer;
193 return answer;
194}
195
661b7972 196/* loss_4state - 4-state model loss generator
197 * Generates losses according to the 4-state Markov chain adopted in
198 * the GI (General and Intuitive) loss model.
199 */
200static bool loss_4state(struct netem_sched_data *q)
201{
202 struct clgstate *clg = &q->clg;
a251c17a 203 u32 rnd = get_random_u32();
661b7972 204
205 /*
25985edc 206 * Makes a comparison between rnd and the transition
661b7972 207 * probabilities outgoing from the current state, then decides the
208 * next state and if the next packet has to be transmitted or lost.
209 * The four states correspond to:
a6e2fe17 210 * TX_IN_GAP_PERIOD => successfully transmitted packets within a gap period
cb3ef7b0
HM
211 * LOST_IN_GAP_PERIOD => isolated losses within a gap period
212 * LOST_IN_BURST_PERIOD => lost packets within a burst period
213 * TX_IN_BURST_PERIOD => successfully transmitted packets within a burst period
661b7972 214 */
215 switch (clg->state) {
a6e2fe17 216 case TX_IN_GAP_PERIOD:
661b7972 217 if (rnd < clg->a4) {
cb3ef7b0 218 clg->state = LOST_IN_GAP_PERIOD;
661b7972 219 return true;
ab6c27be 220 } else if (clg->a4 < rnd && rnd < clg->a1 + clg->a4) {
cb3ef7b0 221 clg->state = LOST_IN_BURST_PERIOD;
661b7972 222 return true;
a6e2fe17
YY
223 } else if (clg->a1 + clg->a4 < rnd) {
224 clg->state = TX_IN_GAP_PERIOD;
225 }
661b7972 226
227 break;
a6e2fe17 228 case TX_IN_BURST_PERIOD:
661b7972 229 if (rnd < clg->a5) {
cb3ef7b0 230 clg->state = LOST_IN_BURST_PERIOD;
661b7972 231 return true;
a6e2fe17
YY
232 } else {
233 clg->state = TX_IN_BURST_PERIOD;
234 }
661b7972 235
236 break;
cb3ef7b0 237 case LOST_IN_BURST_PERIOD:
661b7972 238 if (rnd < clg->a3)
a6e2fe17 239 clg->state = TX_IN_BURST_PERIOD;
661b7972 240 else if (clg->a3 < rnd && rnd < clg->a2 + clg->a3) {
a6e2fe17 241 clg->state = TX_IN_GAP_PERIOD;
661b7972 242 } else if (clg->a2 + clg->a3 < rnd) {
cb3ef7b0 243 clg->state = LOST_IN_BURST_PERIOD;
661b7972 244 return true;
245 }
246 break;
cb3ef7b0 247 case LOST_IN_GAP_PERIOD:
a6e2fe17 248 clg->state = TX_IN_GAP_PERIOD;
661b7972 249 break;
250 }
251
252 return false;
253}
254
255/* loss_gilb_ell - Gilbert-Elliot model loss generator
256 * Generates losses according to the Gilbert-Elliot loss model or
257 * its special cases (Gilbert or Simple Gilbert)
258 *
25985edc 259 * Makes a comparison between random number and the transition
661b7972 260 * probabilities outgoing from the current state, then decides the
25985edc 261 * next state. A second random number is extracted and the comparison
661b7972 262 * with the loss probability of the current state decides if the next
263 * packet will be transmitted or lost.
264 */
265static bool loss_gilb_ell(struct netem_sched_data *q)
266{
267 struct clgstate *clg = &q->clg;
268
269 switch (clg->state) {
c045a734 270 case GOOD_STATE:
a251c17a 271 if (get_random_u32() < clg->a1)
c045a734 272 clg->state = BAD_STATE;
a251c17a 273 if (get_random_u32() < clg->a4)
661b7972 274 return true;
7c2781fa 275 break;
c045a734 276 case BAD_STATE:
a251c17a 277 if (get_random_u32() < clg->a2)
c045a734 278 clg->state = GOOD_STATE;
a251c17a 279 if (get_random_u32() > clg->a3)
661b7972 280 return true;
281 }
282
283 return false;
284}
285
286static bool loss_event(struct netem_sched_data *q)
287{
288 switch (q->loss_model) {
289 case CLG_RANDOM:
290 /* Random packet drop 0 => none, ~0 => all */
291 return q->loss && q->loss >= get_crandom(&q->loss_cor);
292
293 case CLG_4_STATES:
294 /* 4state loss model algorithm (used also for GI model)
295 * Extracts a value from the markov 4 state loss generator,
296 * if it is 1 drops a packet and if needed writes the event in
297 * the kernel logs
298 */
299 return loss_4state(q);
300
301 case CLG_GILB_ELL:
302 /* Gilbert-Elliot loss model algorithm
303 * Extracts a value from the Gilbert-Elliot loss generator,
304 * if it is 1 drops a packet and if needed writes the event in
305 * the kernel logs
306 */
307 return loss_gilb_ell(q);
308 }
309
310 return false; /* not reached */
311}
312
313
1da177e4
LT
314/* tabledist - return a pseudo-randomly distributed value with mean mu and
315 * std deviation sigma. Uses table lookup to approximate the desired
316 * distribution, and a uniformly-distributed pseudo-random source.
317 */
9b0ed891 318static s64 tabledist(s64 mu, s32 sigma,
112f9cb6 319 struct crndstate *state,
9b0ed891 320 const struct disttable *dist)
1da177e4 321{
112f9cb6 322 s64 x;
b407621c
SH
323 long t;
324 u32 rnd;
1da177e4
LT
325
326 if (sigma == 0)
327 return mu;
328
329 rnd = get_crandom(state);
330
331 /* default uniform distribution */
10297b99 332 if (dist == NULL)
eadd1bef 333 return ((rnd % (2 * (u32)sigma)) + mu) - sigma;
1da177e4
LT
334
335 t = dist->table[rnd % dist->size];
336 x = (sigma % NETEM_DIST_SCALE) * t;
337 if (x >= 0)
338 x += NETEM_DIST_SCALE/2;
339 else
340 x -= NETEM_DIST_SCALE/2;
341
342 return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
343}
344
bce552fd 345static u64 packet_time_ns(u64 len, const struct netem_sched_data *q)
7bc0f28c 346{
90b41a1c
HPP
347 len += q->packet_overhead;
348
349 if (q->cell_size) {
350 u32 cells = reciprocal_divide(len, q->cell_size_reciprocal);
351
352 if (len > cells * q->cell_size) /* extra cell needed for remainder */
353 cells++;
354 len = cells * (q->cell_size + q->cell_overhead);
355 }
bce552fd
SH
356
357 return div64_u64(len * NSEC_PER_SEC, q->rate);
7bc0f28c
HPP
358}
359
ff704050 360static void tfifo_reset(struct Qdisc *sch)
361{
362 struct netem_sched_data *q = qdisc_priv(sch);
3aa605f2 363 struct rb_node *p = rb_first(&q->t_root);
ff704050 364
3aa605f2 365 while (p) {
18a4c0ea 366 struct sk_buff *skb = rb_to_skb(p);
ff704050 367
3aa605f2
ED
368 p = rb_next(p);
369 rb_erase(&skb->rbnode, &q->t_root);
2f08a9a1 370 rtnl_kfree_skbs(skb, skb);
ff704050 371 }
d66280b1
PO
372
373 rtnl_kfree_skbs(q->t_head, q->t_tail);
374 q->t_head = NULL;
375 q->t_tail = NULL;
ff704050 376}
377
960fb66e 378static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
50612537 379{
aec0a40a 380 struct netem_sched_data *q = qdisc_priv(sch);
112f9cb6 381 u64 tnext = netem_skb_cb(nskb)->time_to_send;
50612537 382
d66280b1
PO
383 if (!q->t_tail || tnext >= netem_skb_cb(q->t_tail)->time_to_send) {
384 if (q->t_tail)
385 q->t_tail->next = nskb;
aec0a40a 386 else
d66280b1
PO
387 q->t_head = nskb;
388 q->t_tail = nskb;
389 } else {
390 struct rb_node **p = &q->t_root.rb_node, *parent = NULL;
391
392 while (*p) {
393 struct sk_buff *skb;
394
395 parent = *p;
396 skb = rb_to_skb(parent);
397 if (tnext >= netem_skb_cb(skb)->time_to_send)
398 p = &parent->rb_right;
399 else
400 p = &parent->rb_left;
401 }
402 rb_link_node(&nskb->rbnode, parent, p);
403 rb_insert_color(&nskb->rbnode, &q->t_root);
50612537 404 }
aec0a40a 405 sch->q.qlen++;
50612537
ED
406}
407
6071bd1a
NH
408/* netem can't properly corrupt a megapacket (like we get from GSO), so instead
409 * when we statistically choose to corrupt one, we instead segment it, returning
410 * the first packet to be corrupted, and re-enqueue the remaining frames
411 */
520ac30f
ED
412static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch,
413 struct sk_buff **to_free)
6071bd1a
NH
414{
415 struct sk_buff *segs;
416 netdev_features_t features = netif_skb_features(skb);
417
418 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
419
420 if (IS_ERR_OR_NULL(segs)) {
520ac30f 421 qdisc_drop(skb, sch, to_free);
6071bd1a
NH
422 return NULL;
423 }
424 consume_skb(skb);
425 return segs;
426}
427
0afb51e7
SH
428/*
429 * Insert one skb into qdisc.
430 * Note: parent depends on return value to account for queue length.
431 * NET_XMIT_DROP: queue length didn't change.
432 * NET_XMIT_SUCCESS: one skb was queued.
433 */
520ac30f
ED
434static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
435 struct sk_buff **to_free)
1da177e4
LT
436{
437 struct netem_sched_data *q = qdisc_priv(sch);
89e1df74
GC
438 /* We don't fill cb now as skb_unshare() may invalidate it */
439 struct netem_skb_cb *cb;
0afb51e7 440 struct sk_buff *skb2;
6071bd1a 441 struct sk_buff *segs = NULL;
177b8007 442 unsigned int prev_len = qdisc_pkt_len(skb);
0afb51e7 443 int count = 1;
6071bd1a 444 int rc = NET_XMIT_SUCCESS;
5845f706 445 int rc_drop = NET_XMIT_DROP;
1da177e4 446
9410d386
CP
447 /* Do not fool qdisc_drop_all() */
448 skb->prev = NULL;
449
0afb51e7
SH
450 /* Random duplication */
451 if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
452 ++count;
453
661b7972 454 /* Drop packet? */
e4ae004b
ED
455 if (loss_event(q)) {
456 if (q->ecn && INET_ECN_set_ce(skb))
25331d6c 457 qdisc_qstats_drop(sch); /* mark packet */
e4ae004b
ED
458 else
459 --count;
460 }
0afb51e7 461 if (count == 0) {
25331d6c 462 qdisc_qstats_drop(sch);
520ac30f 463 __qdisc_drop(skb, to_free);
c27f339a 464 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
1da177e4
LT
465 }
466
5a308f40
ED
467 /* If a delay is expected, orphan the skb. (orphaning usually takes
468 * place at TX completion time, so _before_ the link transit delay)
5a308f40 469 */
5080f39e 470 if (q->latency || q->jitter || q->rate)
f2f872f9 471 skb_orphan_partial(skb);
4e8a5201 472
0afb51e7
SH
473 /*
474 * If we need to duplicate packet, then re-insert at top of the
475 * qdisc tree, since parent queuer expects that only one
476 * skb will be queued.
477 */
478 if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
159d2c7d 479 struct Qdisc *rootq = qdisc_root_bh(sch);
0afb51e7 480 u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
0afb51e7 481
b396cca6 482 q->duplicate = 0;
520ac30f 483 rootq->enqueue(skb2, rootq, to_free);
0afb51e7 484 q->duplicate = dupsave;
5845f706 485 rc_drop = NET_XMIT_SUCCESS;
1da177e4
LT
486 }
487
c865e5d9
SH
488 /*
489 * Randomized packet corruption.
490 * Make copy if needed since we are modifying
491 * If packet is going to be hardware checksummed, then
492 * do it now in software before we mangle it.
493 */
494 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
6071bd1a 495 if (skb_is_gso(skb)) {
3e14c383
JK
496 skb = netem_segment(skb, sch, to_free);
497 if (!skb)
5845f706 498 return rc_drop;
3e14c383
JK
499 segs = skb->next;
500 skb_mark_not_on_list(skb);
501 qdisc_skb_cb(skb)->pkt_len = skb->len;
6071bd1a
NH
502 }
503
8a6e9c67
ED
504 skb = skb_unshare(skb, GFP_ATOMIC);
505 if (unlikely(!skb)) {
506 qdisc_qstats_drop(sch);
507 goto finish_segs;
508 }
509 if (skb->ip_summed == CHECKSUM_PARTIAL &&
510 skb_checksum_help(skb)) {
511 qdisc_drop(skb, sch, to_free);
a7fa12d1 512 skb = NULL;
6071bd1a
NH
513 goto finish_segs;
514 }
c865e5d9 515
8032bf12
JD
516 skb->data[get_random_u32_below(skb_headlen(skb))] ^=
517 1<<get_random_u32_below(8);
c865e5d9
SH
518 }
519
5845f706 520 if (unlikely(sch->q.qlen >= sch->limit)) {
3e14c383
JK
521 /* re-link segs, so that qdisc_drop_all() frees them all */
522 skb->next = segs;
5845f706
SL
523 qdisc_drop_all(skb, sch, to_free);
524 return rc_drop;
525 }
960fb66e 526
25331d6c 527 qdisc_qstats_backlog_inc(sch, skb);
960fb66e 528
5f86173b 529 cb = netem_skb_cb(skb);
cc7ec456 530 if (q->gap == 0 || /* not doing reordering */
a42b4799 531 q->counter < q->gap - 1 || /* inside last reordering gap */
f64f9e71 532 q->reorder < get_crandom(&q->reorder_cor)) {
112f9cb6
DT
533 u64 now;
534 s64 delay;
07aaa115
SH
535
536 delay = tabledist(q->latency, q->jitter,
537 &q->delay_cor, q->delay_dist);
538
112f9cb6 539 now = ktime_get_ns();
7bc0f28c
HPP
540
541 if (q->rate) {
5080f39e
NU
542 struct netem_skb_cb *last = NULL;
543
544 if (sch->q.tail)
545 last = netem_skb_cb(sch->q.tail);
546 if (q->t_root.rb_node) {
547 struct sk_buff *t_skb;
548 struct netem_skb_cb *t_last;
549
18a4c0ea 550 t_skb = skb_rb_last(&q->t_root);
5080f39e
NU
551 t_last = netem_skb_cb(t_skb);
552 if (!last ||
d66280b1
PO
553 t_last->time_to_send > last->time_to_send)
554 last = t_last;
555 }
556 if (q->t_tail) {
557 struct netem_skb_cb *t_last =
558 netem_skb_cb(q->t_tail);
559
560 if (!last ||
561 t_last->time_to_send > last->time_to_send)
5080f39e 562 last = t_last;
5080f39e 563 }
7bc0f28c 564
aec0a40a 565 if (last) {
7bc0f28c 566 /*
a13d3104
JN
567 * Last packet in queue is reference point (now),
568 * calculate this time bonus and subtract
7bc0f28c
HPP
569 * from delay.
570 */
5080f39e 571 delay -= last->time_to_send - now;
112f9cb6 572 delay = max_t(s64, 0, delay);
5080f39e 573 now = last->time_to_send;
7bc0f28c 574 }
a13d3104 575
bce552fd 576 delay += packet_time_ns(qdisc_pkt_len(skb), q);
7bc0f28c
HPP
577 }
578
7c59e25f 579 cb->time_to_send = now + delay;
1da177e4 580 ++q->counter;
960fb66e 581 tfifo_enqueue(skb, sch);
1da177e4 582 } else {
10297b99 583 /*
0dca51d3
SH
584 * Do re-ordering by putting one out of N packets at the front
585 * of the queue.
586 */
112f9cb6 587 cb->time_to_send = ktime_get_ns();
0dca51d3 588 q->counter = 0;
8ba25dad 589
59697730 590 __qdisc_enqueue_head(skb, &sch->q);
eb101924 591 sch->qstats.requeues++;
378a2f09 592 }
1da177e4 593
6071bd1a
NH
594finish_segs:
595 if (segs) {
177b8007 596 unsigned int len, last_len;
a7fa12d1 597 int nb;
177b8007 598
a7fa12d1
JK
599 len = skb ? skb->len : 0;
600 nb = skb ? 1 : 0;
177b8007 601
6071bd1a
NH
602 while (segs) {
603 skb2 = segs->next;
a8305bff 604 skb_mark_not_on_list(segs);
6071bd1a
NH
605 qdisc_skb_cb(segs)->pkt_len = segs->len;
606 last_len = segs->len;
520ac30f 607 rc = qdisc_enqueue(segs, sch, to_free);
6071bd1a
NH
608 if (rc != NET_XMIT_SUCCESS) {
609 if (net_xmit_drop_count(rc))
610 qdisc_qstats_drop(sch);
611 } else {
612 nb++;
613 len += last_len;
614 }
615 segs = skb2;
616 }
a7fa12d1
JK
617 /* Parent qdiscs accounted for 1 skb of size @prev_len */
618 qdisc_tree_reduce_backlog(sch, -(nb - 1), -(len - prev_len));
e0ad032e
JK
619 } else if (!skb) {
620 return NET_XMIT_DROP;
6071bd1a 621 }
10f6dfcf 622 return NET_XMIT_SUCCESS;
1da177e4
LT
623}
624
836af83b
DT
625/* Delay the next round with a new future slot with a
626 * correct number of bytes and packets.
627 */
628
629static void get_slot_next(struct netem_sched_data *q, u64 now)
630{
0a9fe5c3
YS
631 s64 next_delay;
632
633 if (!q->slot_dist)
634 next_delay = q->slot_config.min_delay +
a251c17a 635 (get_random_u32() *
0a9fe5c3
YS
636 (q->slot_config.max_delay -
637 q->slot_config.min_delay) >> 32);
638 else
639 next_delay = tabledist(q->slot_config.dist_delay,
640 (s32)(q->slot_config.dist_jitter),
641 NULL, q->slot_dist);
642
643 q->slot.slot_next = now + next_delay;
836af83b
DT
644 q->slot.packets_left = q->slot_config.max_packets;
645 q->slot.bytes_left = q->slot_config.max_bytes;
646}
647
d66280b1
PO
648static struct sk_buff *netem_peek(struct netem_sched_data *q)
649{
650 struct sk_buff *skb = skb_rb_first(&q->t_root);
651 u64 t1, t2;
652
653 if (!skb)
654 return q->t_head;
655 if (!q->t_head)
656 return skb;
657
658 t1 = netem_skb_cb(skb)->time_to_send;
659 t2 = netem_skb_cb(q->t_head)->time_to_send;
660 if (t1 < t2)
661 return skb;
662 return q->t_head;
663}
664
665static void netem_erase_head(struct netem_sched_data *q, struct sk_buff *skb)
666{
667 if (skb == q->t_head) {
668 q->t_head = skb->next;
669 if (!q->t_head)
670 q->t_tail = NULL;
671 } else {
672 rb_erase(&skb->rbnode, &q->t_root);
673 }
674}
675
1da177e4
LT
676static struct sk_buff *netem_dequeue(struct Qdisc *sch)
677{
678 struct netem_sched_data *q = qdisc_priv(sch);
679 struct sk_buff *skb;
680
50612537 681tfifo_dequeue:
ed760cb8 682 skb = __qdisc_dequeue_head(&sch->q);
771018e7 683 if (skb) {
25331d6c 684 qdisc_qstats_backlog_dec(sch, skb);
0ad2a836 685deliver:
aec0a40a
ED
686 qdisc_bstats_update(sch, skb);
687 return skb;
688 }
d66280b1
PO
689 skb = netem_peek(q);
690 if (skb) {
112f9cb6 691 u64 time_to_send;
836af83b 692 u64 now = ktime_get_ns();
36b7bfe0 693
0f9f32ac 694 /* if more time remaining? */
36b7bfe0 695 time_to_send = netem_skb_cb(skb)->time_to_send;
836af83b
DT
696 if (q->slot.slot_next && q->slot.slot_next < time_to_send)
697 get_slot_next(q, now);
aec0a40a 698
d66280b1
PO
699 if (time_to_send <= now && q->slot.slot_next <= now) {
700 netem_erase_head(q, skb);
aec0a40a 701 sch->q.qlen--;
0ad2a836 702 qdisc_qstats_backlog_dec(sch, skb);
aec0a40a
ED
703 skb->next = NULL;
704 skb->prev = NULL;
bffa72cf
ED
705 /* skb->dev shares skb->rbnode area,
706 * we need to restore its value.
707 */
708 skb->dev = qdisc_dev(sch);
03c05f0d 709
836af83b
DT
710 if (q->slot.slot_next) {
711 q->slot.packets_left--;
712 q->slot.bytes_left -= qdisc_pkt_len(skb);
713 if (q->slot.packets_left <= 0 ||
714 q->slot.bytes_left <= 0)
715 get_slot_next(q, now);
716 }
717
50612537 718 if (q->qdisc) {
21de12ee 719 unsigned int pkt_len = qdisc_pkt_len(skb);
520ac30f
ED
720 struct sk_buff *to_free = NULL;
721 int err;
50612537 722
520ac30f
ED
723 err = qdisc_enqueue(skb, q->qdisc, &to_free);
724 kfree_skb_list(to_free);
21de12ee
ED
725 if (err != NET_XMIT_SUCCESS &&
726 net_xmit_drop_count(err)) {
727 qdisc_qstats_drop(sch);
728 qdisc_tree_reduce_backlog(sch, 1,
729 pkt_len);
50612537
ED
730 }
731 goto tfifo_dequeue;
732 }
aec0a40a 733 goto deliver;
07aaa115 734 }
11274e5a 735
50612537
ED
736 if (q->qdisc) {
737 skb = q->qdisc->ops->dequeue(q->qdisc);
738 if (skb)
739 goto deliver;
740 }
836af83b
DT
741
742 qdisc_watchdog_schedule_ns(&q->watchdog,
743 max(time_to_send,
744 q->slot.slot_next));
0f9f32ac
SH
745 }
746
50612537
ED
747 if (q->qdisc) {
748 skb = q->qdisc->ops->dequeue(q->qdisc);
749 if (skb)
750 goto deliver;
751 }
0f9f32ac 752 return NULL;
1da177e4
LT
753}
754
1da177e4
LT
755static void netem_reset(struct Qdisc *sch)
756{
757 struct netem_sched_data *q = qdisc_priv(sch);
758
50612537 759 qdisc_reset_queue(sch);
ff704050 760 tfifo_reset(sch);
50612537
ED
761 if (q->qdisc)
762 qdisc_reset(q->qdisc);
59cb5c67 763 qdisc_watchdog_cancel(&q->watchdog);
1da177e4
LT
764}
765
6373a9a2 766static void dist_free(struct disttable *d)
767{
4cb28970 768 kvfree(d);
6373a9a2 769}
770
1da177e4
LT
771/*
772 * Distribution data is a variable size payload containing
773 * signed 16 bit values.
774 */
836af83b 775
0a9fe5c3
YS
776static int get_dist_table(struct Qdisc *sch, struct disttable **tbl,
777 const struct nlattr *attr)
1da177e4 778{
6373a9a2 779 size_t n = nla_len(attr)/sizeof(__s16);
1e90474c 780 const __s16 *data = nla_data(attr);
7698b4fc 781 spinlock_t *root_lock;
1da177e4
LT
782 struct disttable *d;
783 int i;
784
b41d936b 785 if (!n || n > NETEM_DIST_MAX)
1da177e4
LT
786 return -EINVAL;
787
12929198 788 d = kvmalloc(struct_size(d, table, n), GFP_KERNEL);
1da177e4
LT
789 if (!d)
790 return -ENOMEM;
791
792 d->size = n;
793 for (i = 0; i < n; i++)
794 d->table[i] = data[i];
10297b99 795
102396ae 796 root_lock = qdisc_root_sleeping_lock(sch);
7698b4fc
DM
797
798 spin_lock_bh(root_lock);
0a9fe5c3 799 swap(*tbl, d);
7698b4fc 800 spin_unlock_bh(root_lock);
bb52c7ac
ED
801
802 dist_free(d);
1da177e4
LT
803 return 0;
804}
805
836af83b
DT
806static void get_slot(struct netem_sched_data *q, const struct nlattr *attr)
807{
808 const struct tc_netem_slot *c = nla_data(attr);
809
810 q->slot_config = *c;
811 if (q->slot_config.max_packets == 0)
812 q->slot_config.max_packets = INT_MAX;
813 if (q->slot_config.max_bytes == 0)
814 q->slot_config.max_bytes = INT_MAX;
eadd1bef
AN
815
816 /* capping dist_jitter to the range acceptable by tabledist() */
817 q->slot_config.dist_jitter = min_t(__s64, INT_MAX, abs(q->slot_config.dist_jitter));
818
836af83b
DT
819 q->slot.packets_left = q->slot_config.max_packets;
820 q->slot.bytes_left = q->slot_config.max_bytes;
0a9fe5c3
YS
821 if (q->slot_config.min_delay | q->slot_config.max_delay |
822 q->slot_config.dist_jitter)
836af83b
DT
823 q->slot.slot_next = ktime_get_ns();
824 else
825 q->slot.slot_next = 0;
826}
827
49545a77 828static void get_correlation(struct netem_sched_data *q, const struct nlattr *attr)
1da177e4 829{
1e90474c 830 const struct tc_netem_corr *c = nla_data(attr);
1da177e4 831
1da177e4
LT
832 init_crandom(&q->delay_cor, c->delay_corr);
833 init_crandom(&q->loss_cor, c->loss_corr);
834 init_crandom(&q->dup_cor, c->dup_corr);
1da177e4
LT
835}
836
49545a77 837static void get_reorder(struct netem_sched_data *q, const struct nlattr *attr)
0dca51d3 838{
1e90474c 839 const struct tc_netem_reorder *r = nla_data(attr);
0dca51d3 840
0dca51d3
SH
841 q->reorder = r->probability;
842 init_crandom(&q->reorder_cor, r->correlation);
0dca51d3
SH
843}
844
49545a77 845static void get_corrupt(struct netem_sched_data *q, const struct nlattr *attr)
c865e5d9 846{
1e90474c 847 const struct tc_netem_corrupt *r = nla_data(attr);
c865e5d9 848
c865e5d9
SH
849 q->corrupt = r->probability;
850 init_crandom(&q->corrupt_cor, r->correlation);
c865e5d9
SH
851}
852
49545a77 853static void get_rate(struct netem_sched_data *q, const struct nlattr *attr)
7bc0f28c 854{
7bc0f28c
HPP
855 const struct tc_netem_rate *r = nla_data(attr);
856
857 q->rate = r->rate;
90b41a1c
HPP
858 q->packet_overhead = r->packet_overhead;
859 q->cell_size = r->cell_size;
809fa972 860 q->cell_overhead = r->cell_overhead;
90b41a1c
HPP
861 if (q->cell_size)
862 q->cell_size_reciprocal = reciprocal_value(q->cell_size);
809fa972
HFS
863 else
864 q->cell_size_reciprocal = (struct reciprocal_value) { 0 };
7bc0f28c
HPP
865}
866
49545a77 867static int get_loss_clg(struct netem_sched_data *q, const struct nlattr *attr)
661b7972 868{
661b7972 869 const struct nlattr *la;
870 int rem;
871
872 nla_for_each_nested(la, attr, rem) {
873 u16 type = nla_type(la);
874
833fa743 875 switch (type) {
661b7972 876 case NETEM_LOSS_GI: {
877 const struct tc_netem_gimodel *gi = nla_data(la);
878
2494654d 879 if (nla_len(la) < sizeof(struct tc_netem_gimodel)) {
661b7972 880 pr_info("netem: incorrect gi model size\n");
881 return -EINVAL;
882 }
883
884 q->loss_model = CLG_4_STATES;
885
3fbac2a8 886 q->clg.state = TX_IN_GAP_PERIOD;
661b7972 887 q->clg.a1 = gi->p13;
888 q->clg.a2 = gi->p31;
889 q->clg.a3 = gi->p32;
890 q->clg.a4 = gi->p14;
891 q->clg.a5 = gi->p23;
892 break;
893 }
894
895 case NETEM_LOSS_GE: {
896 const struct tc_netem_gemodel *ge = nla_data(la);
897
2494654d 898 if (nla_len(la) < sizeof(struct tc_netem_gemodel)) {
899 pr_info("netem: incorrect ge model size\n");
661b7972 900 return -EINVAL;
901 }
902
903 q->loss_model = CLG_GILB_ELL;
3fbac2a8 904 q->clg.state = GOOD_STATE;
661b7972 905 q->clg.a1 = ge->p;
906 q->clg.a2 = ge->r;
907 q->clg.a3 = ge->h;
908 q->clg.a4 = ge->k1;
909 break;
910 }
911
912 default:
913 pr_info("netem: unknown loss type %u\n", type);
914 return -EINVAL;
915 }
916 }
917
918 return 0;
919}
920
27a3421e
PM
921static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
922 [TCA_NETEM_CORR] = { .len = sizeof(struct tc_netem_corr) },
923 [TCA_NETEM_REORDER] = { .len = sizeof(struct tc_netem_reorder) },
924 [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) },
7bc0f28c 925 [TCA_NETEM_RATE] = { .len = sizeof(struct tc_netem_rate) },
661b7972 926 [TCA_NETEM_LOSS] = { .type = NLA_NESTED },
e4ae004b 927 [TCA_NETEM_ECN] = { .type = NLA_U32 },
6a031f67 928 [TCA_NETEM_RATE64] = { .type = NLA_U64 },
99803171
DT
929 [TCA_NETEM_LATENCY64] = { .type = NLA_S64 },
930 [TCA_NETEM_JITTER64] = { .type = NLA_S64 },
836af83b 931 [TCA_NETEM_SLOT] = { .len = sizeof(struct tc_netem_slot) },
27a3421e
PM
932};
933
2c10b32b
TG
934static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
935 const struct nla_policy *policy, int len)
936{
937 int nested_len = nla_len(nla) - NLA_ALIGN(len);
938
661b7972 939 if (nested_len < 0) {
940 pr_info("netem: invalid attributes len %d\n", nested_len);
2c10b32b 941 return -EINVAL;
661b7972 942 }
943
2c10b32b 944 if (nested_len >= nla_attr_size(0))
8cb08174
JB
945 return nla_parse_deprecated(tb, maxtype,
946 nla_data(nla) + NLA_ALIGN(len),
947 nested_len, policy, NULL);
661b7972 948
2c10b32b
TG
949 memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
950 return 0;
951}
952
c865e5d9 953/* Parse netlink message to set options */
2030721c
AA
954static int netem_change(struct Qdisc *sch, struct nlattr *opt,
955 struct netlink_ext_ack *extack)
1da177e4
LT
956{
957 struct netem_sched_data *q = qdisc_priv(sch);
b03f4672 958 struct nlattr *tb[TCA_NETEM_MAX + 1];
1da177e4 959 struct tc_netem_qopt *qopt;
54a4b05c
YY
960 struct clgstate old_clg;
961 int old_loss_model = CLG_RANDOM;
1da177e4 962 int ret;
10297b99 963
2c10b32b
TG
964 qopt = nla_data(opt);
965 ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt));
b03f4672
PM
966 if (ret < 0)
967 return ret;
968
54a4b05c
YY
969 /* backup q->clg and q->loss_model */
970 old_clg = q->clg;
971 old_loss_model = q->loss_model;
972
973 if (tb[TCA_NETEM_LOSS]) {
49545a77 974 ret = get_loss_clg(q, tb[TCA_NETEM_LOSS]);
54a4b05c
YY
975 if (ret) {
976 q->loss_model = old_loss_model;
977 return ret;
978 }
979 } else {
980 q->loss_model = CLG_RANDOM;
981 }
982
983 if (tb[TCA_NETEM_DELAY_DIST]) {
0a9fe5c3
YS
984 ret = get_dist_table(sch, &q->delay_dist,
985 tb[TCA_NETEM_DELAY_DIST]);
986 if (ret)
987 goto get_table_failure;
988 }
989
990 if (tb[TCA_NETEM_SLOT_DIST]) {
991 ret = get_dist_table(sch, &q->slot_dist,
992 tb[TCA_NETEM_SLOT_DIST]);
993 if (ret)
994 goto get_table_failure;
54a4b05c
YY
995 }
996
50612537 997 sch->limit = qopt->limit;
10297b99 998
112f9cb6
DT
999 q->latency = PSCHED_TICKS2NS(qopt->latency);
1000 q->jitter = PSCHED_TICKS2NS(qopt->jitter);
1da177e4
LT
1001 q->limit = qopt->limit;
1002 q->gap = qopt->gap;
0dca51d3 1003 q->counter = 0;
1da177e4
LT
1004 q->loss = qopt->loss;
1005 q->duplicate = qopt->duplicate;
1006
bb2f8cc0
SH
1007 /* for compatibility with earlier versions.
1008 * if gap is set, need to assume 100% probability
0dca51d3 1009 */
a362e0a7
SH
1010 if (q->gap)
1011 q->reorder = ~0;
0dca51d3 1012
265eb67f 1013 if (tb[TCA_NETEM_CORR])
49545a77 1014 get_correlation(q, tb[TCA_NETEM_CORR]);
1da177e4 1015
265eb67f 1016 if (tb[TCA_NETEM_REORDER])
49545a77 1017 get_reorder(q, tb[TCA_NETEM_REORDER]);
1da177e4 1018
265eb67f 1019 if (tb[TCA_NETEM_CORRUPT])
49545a77 1020 get_corrupt(q, tb[TCA_NETEM_CORRUPT]);
1da177e4 1021
7bc0f28c 1022 if (tb[TCA_NETEM_RATE])
49545a77 1023 get_rate(q, tb[TCA_NETEM_RATE]);
7bc0f28c 1024
6a031f67
YY
1025 if (tb[TCA_NETEM_RATE64])
1026 q->rate = max_t(u64, q->rate,
1027 nla_get_u64(tb[TCA_NETEM_RATE64]));
1028
99803171
DT
1029 if (tb[TCA_NETEM_LATENCY64])
1030 q->latency = nla_get_s64(tb[TCA_NETEM_LATENCY64]);
1031
1032 if (tb[TCA_NETEM_JITTER64])
1033 q->jitter = nla_get_s64(tb[TCA_NETEM_JITTER64]);
1034
e4ae004b
ED
1035 if (tb[TCA_NETEM_ECN])
1036 q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]);
1037
836af83b
DT
1038 if (tb[TCA_NETEM_SLOT])
1039 get_slot(q, tb[TCA_NETEM_SLOT]);
1040
eadd1bef
AN
1041 /* capping jitter to the range acceptable by tabledist() */
1042 q->jitter = min_t(s64, abs(q->jitter), INT_MAX);
1043
661b7972 1044 return ret;
0a9fe5c3
YS
1045
1046get_table_failure:
1047 /* recover clg and loss_model, in case of
1048 * q->clg and q->loss_model were modified
1049 * in get_loss_clg()
1050 */
1051 q->clg = old_clg;
1052 q->loss_model = old_loss_model;
1053 return ret;
1da177e4
LT
1054}
1055
e63d7dfd
AA
1056static int netem_init(struct Qdisc *sch, struct nlattr *opt,
1057 struct netlink_ext_ack *extack)
1da177e4
LT
1058{
1059 struct netem_sched_data *q = qdisc_priv(sch);
1060 int ret;
1061
634576a1
NA
1062 qdisc_watchdog_init(&q->watchdog, sch);
1063
1da177e4
LT
1064 if (!opt)
1065 return -EINVAL;
1066
661b7972 1067 q->loss_model = CLG_RANDOM;
2030721c 1068 ret = netem_change(sch, opt, extack);
50612537 1069 if (ret)
250a65f7 1070 pr_info("netem: change failed\n");
1da177e4
LT
1071 return ret;
1072}
1073
1074static void netem_destroy(struct Qdisc *sch)
1075{
1076 struct netem_sched_data *q = qdisc_priv(sch);
1077
59cb5c67 1078 qdisc_watchdog_cancel(&q->watchdog);
50612537 1079 if (q->qdisc)
86bd446b 1080 qdisc_put(q->qdisc);
6373a9a2 1081 dist_free(q->delay_dist);
0a9fe5c3 1082 dist_free(q->slot_dist);
1da177e4
LT
1083}
1084
661b7972 1085static int dump_loss_model(const struct netem_sched_data *q,
1086 struct sk_buff *skb)
1087{
1088 struct nlattr *nest;
1089
ae0be8de 1090 nest = nla_nest_start_noflag(skb, TCA_NETEM_LOSS);
661b7972 1091 if (nest == NULL)
1092 goto nla_put_failure;
1093
1094 switch (q->loss_model) {
1095 case CLG_RANDOM:
1096 /* legacy loss model */
1097 nla_nest_cancel(skb, nest);
1098 return 0; /* no data */
1099
1100 case CLG_4_STATES: {
1101 struct tc_netem_gimodel gi = {
1102 .p13 = q->clg.a1,
1103 .p31 = q->clg.a2,
1104 .p32 = q->clg.a3,
1105 .p14 = q->clg.a4,
1106 .p23 = q->clg.a5,
1107 };
1108
1b34ec43
DM
1109 if (nla_put(skb, NETEM_LOSS_GI, sizeof(gi), &gi))
1110 goto nla_put_failure;
661b7972 1111 break;
1112 }
1113 case CLG_GILB_ELL: {
1114 struct tc_netem_gemodel ge = {
1115 .p = q->clg.a1,
1116 .r = q->clg.a2,
1117 .h = q->clg.a3,
1118 .k1 = q->clg.a4,
1119 };
1120
1b34ec43
DM
1121 if (nla_put(skb, NETEM_LOSS_GE, sizeof(ge), &ge))
1122 goto nla_put_failure;
661b7972 1123 break;
1124 }
1125 }
1126
1127 nla_nest_end(skb, nest);
1128 return 0;
1129
1130nla_put_failure:
1131 nla_nest_cancel(skb, nest);
1132 return -1;
1133}
1134
1da177e4
LT
1135static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
1136{
1137 const struct netem_sched_data *q = qdisc_priv(sch);
861d7f74 1138 struct nlattr *nla = (struct nlattr *) skb_tail_pointer(skb);
1da177e4
LT
1139 struct tc_netem_qopt qopt;
1140 struct tc_netem_corr cor;
0dca51d3 1141 struct tc_netem_reorder reorder;
c865e5d9 1142 struct tc_netem_corrupt corrupt;
7bc0f28c 1143 struct tc_netem_rate rate;
836af83b 1144 struct tc_netem_slot slot;
1da177e4 1145
a2b1a5d4 1146 qopt.latency = min_t(psched_time_t, PSCHED_NS2TICKS(q->latency),
112f9cb6 1147 UINT_MAX);
a2b1a5d4 1148 qopt.jitter = min_t(psched_time_t, PSCHED_NS2TICKS(q->jitter),
112f9cb6 1149 UINT_MAX);
1da177e4
LT
1150 qopt.limit = q->limit;
1151 qopt.loss = q->loss;
1152 qopt.gap = q->gap;
1153 qopt.duplicate = q->duplicate;
1b34ec43
DM
1154 if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
1155 goto nla_put_failure;
1da177e4 1156
99803171
DT
1157 if (nla_put(skb, TCA_NETEM_LATENCY64, sizeof(q->latency), &q->latency))
1158 goto nla_put_failure;
1159
1160 if (nla_put(skb, TCA_NETEM_JITTER64, sizeof(q->jitter), &q->jitter))
1161 goto nla_put_failure;
1162
1da177e4
LT
1163 cor.delay_corr = q->delay_cor.rho;
1164 cor.loss_corr = q->loss_cor.rho;
1165 cor.dup_corr = q->dup_cor.rho;
1b34ec43
DM
1166 if (nla_put(skb, TCA_NETEM_CORR, sizeof(cor), &cor))
1167 goto nla_put_failure;
0dca51d3
SH
1168
1169 reorder.probability = q->reorder;
1170 reorder.correlation = q->reorder_cor.rho;
1b34ec43
DM
1171 if (nla_put(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder))
1172 goto nla_put_failure;
0dca51d3 1173
c865e5d9
SH
1174 corrupt.probability = q->corrupt;
1175 corrupt.correlation = q->corrupt_cor.rho;
1b34ec43
DM
1176 if (nla_put(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt))
1177 goto nla_put_failure;
c865e5d9 1178
6a031f67 1179 if (q->rate >= (1ULL << 32)) {
2a51c1e8
ND
1180 if (nla_put_u64_64bit(skb, TCA_NETEM_RATE64, q->rate,
1181 TCA_NETEM_PAD))
6a031f67
YY
1182 goto nla_put_failure;
1183 rate.rate = ~0U;
1184 } else {
1185 rate.rate = q->rate;
1186 }
90b41a1c
HPP
1187 rate.packet_overhead = q->packet_overhead;
1188 rate.cell_size = q->cell_size;
1189 rate.cell_overhead = q->cell_overhead;
1b34ec43
DM
1190 if (nla_put(skb, TCA_NETEM_RATE, sizeof(rate), &rate))
1191 goto nla_put_failure;
7bc0f28c 1192
e4ae004b
ED
1193 if (q->ecn && nla_put_u32(skb, TCA_NETEM_ECN, q->ecn))
1194 goto nla_put_failure;
1195
661b7972 1196 if (dump_loss_model(q, skb) != 0)
1197 goto nla_put_failure;
1198
0a9fe5c3
YS
1199 if (q->slot_config.min_delay | q->slot_config.max_delay |
1200 q->slot_config.dist_jitter) {
836af83b
DT
1201 slot = q->slot_config;
1202 if (slot.max_packets == INT_MAX)
1203 slot.max_packets = 0;
1204 if (slot.max_bytes == INT_MAX)
1205 slot.max_bytes = 0;
1206 if (nla_put(skb, TCA_NETEM_SLOT, sizeof(slot), &slot))
1207 goto nla_put_failure;
1208 }
1209
861d7f74 1210 return nla_nest_end(skb, nla);
1da177e4 1211
1e90474c 1212nla_put_failure:
861d7f74 1213 nlmsg_trim(skb, nla);
1da177e4
LT
1214 return -1;
1215}
1216
10f6dfcf 1217static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
1218 struct sk_buff *skb, struct tcmsg *tcm)
1219{
1220 struct netem_sched_data *q = qdisc_priv(sch);
1221
50612537 1222 if (cl != 1 || !q->qdisc) /* only one class */
10f6dfcf 1223 return -ENOENT;
1224
1225 tcm->tcm_handle |= TC_H_MIN(1);
1226 tcm->tcm_info = q->qdisc->handle;
1227
1228 return 0;
1229}
1230
1231static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
653d6fd6 1232 struct Qdisc **old, struct netlink_ext_ack *extack)
10f6dfcf 1233{
1234 struct netem_sched_data *q = qdisc_priv(sch);
1235
86a7996c 1236 *old = qdisc_replace(sch, new, &q->qdisc);
10f6dfcf 1237 return 0;
1238}
1239
1240static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
1241{
1242 struct netem_sched_data *q = qdisc_priv(sch);
1243 return q->qdisc;
1244}
1245
143976ce 1246static unsigned long netem_find(struct Qdisc *sch, u32 classid)
10f6dfcf 1247{
1248 return 1;
1249}
1250
10f6dfcf 1251static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
1252{
1253 if (!walker->stop) {
e046fa89
ZS
1254 if (!tc_qdisc_stats_dump(sch, 1, walker))
1255 return;
10f6dfcf 1256 }
1257}
1258
1259static const struct Qdisc_class_ops netem_class_ops = {
1260 .graft = netem_graft,
1261 .leaf = netem_leaf,
143976ce 1262 .find = netem_find,
10f6dfcf 1263 .walk = netem_walk,
1264 .dump = netem_dump_class,
1265};
1266
20fea08b 1267static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
1da177e4 1268 .id = "netem",
10f6dfcf 1269 .cl_ops = &netem_class_ops,
1da177e4
LT
1270 .priv_size = sizeof(struct netem_sched_data),
1271 .enqueue = netem_enqueue,
1272 .dequeue = netem_dequeue,
77be155c 1273 .peek = qdisc_peek_dequeued,
1da177e4
LT
1274 .init = netem_init,
1275 .reset = netem_reset,
1276 .destroy = netem_destroy,
1277 .change = netem_change,
1278 .dump = netem_dump,
1279 .owner = THIS_MODULE,
1280};
1281
1282
1283static int __init netem_module_init(void)
1284{
eb229c4c 1285 pr_info("netem: version " VERSION "\n");
1da177e4
LT
1286 return register_qdisc(&netem_qdisc_ops);
1287}
1288static void __exit netem_module_exit(void)
1289{
1290 unregister_qdisc(&netem_qdisc_ops);
1291}
1292module_init(netem_module_init)
1293module_exit(netem_module_exit)
1294MODULE_LICENSE("GPL");