Merge tag 'pm-6.16-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
[linux-block.git] / net / sched / sch_red.c
CommitLineData
2874c5fd 1// SPDX-License-Identifier: GPL-2.0-or-later
1da177e4
LT
2/*
3 * net/sched/sch_red.c Random Early Detection queue.
4 *
1da177e4
LT
5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6 *
7 * Changes:
dba051f3 8 * J Hadi Salim 980914: computation fixes
1da177e4 9 * Alexey Makarenko <makar@phoenix.kharkov.ua> 990814: qave on idle link was calculated incorrectly.
dba051f3 10 * J Hadi Salim 980816: ECN support
1da177e4
LT
11 */
12
1da177e4 13#include <linux/module.h>
1da177e4
LT
14#include <linux/types.h>
15#include <linux/kernel.h>
1da177e4 16#include <linux/skbuff.h>
1da177e4 17#include <net/pkt_sched.h>
602f3baf 18#include <net/pkt_cls.h>
1da177e4 19#include <net/inet_ecn.h>
6b31b28a 20#include <net/red.h>
1da177e4
LT
21
22
6b31b28a 23/* Parameters, settable by user:
1da177e4
LT
24 -----------------------------
25
26 limit - bytes (must be > qth_max + burst)
27
28 Hard limit on queue length, should be chosen >qth_max
29 to allow packet bursts. This parameter does not
30 affect the algorithms behaviour and can be chosen
31 arbitrarily high (well, less than ram size)
32 Really, this limit will never be reached
33 if RED works correctly.
1da177e4
LT
34 */
35
cc7ec456 36struct red_sched_data {
6b31b28a 37 u32 limit; /* HARD maximal queue length */
14bc175d 38
6b31b28a 39 unsigned char flags;
14bc175d
PM
40 /* Non-flags in tc_red_qopt.flags. */
41 unsigned char userbits;
42
8af2a218 43 struct timer_list adapt_timer;
cdeabbb8 44 struct Qdisc *sch;
6b31b28a 45 struct red_parms parms;
eeca6688 46 struct red_vars vars;
6b31b28a 47 struct red_stats stats;
f38c39d6 48 struct Qdisc *qdisc;
aee9caa0
PM
49 struct tcf_qevent qe_early_drop;
50 struct tcf_qevent qe_mark;
1da177e4
LT
51};
52
47a1494b 53#define TC_RED_SUPPORTED_FLAGS (TC_RED_HISTORIC_FLAGS | TC_RED_NODROP)
14bc175d 54
6b31b28a 55static inline int red_use_ecn(struct red_sched_data *q)
1da177e4 56{
6b31b28a 57 return q->flags & TC_RED_ECN;
1da177e4
LT
58}
59
bdc450a0
TG
60static inline int red_use_harddrop(struct red_sched_data *q)
61{
62 return q->flags & TC_RED_HARDDROP;
63}
64
0a7fad23
PM
65static int red_use_nodrop(struct red_sched_data *q)
66{
67 return q->flags & TC_RED_NODROP;
68}
69
ac5c66f2 70static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch,
520ac30f 71 struct sk_buff **to_free)
1da177e4 72{
ff9f17ce 73 enum skb_drop_reason reason = SKB_DROP_REASON_QDISC_CONGESTED;
1da177e4 74 struct red_sched_data *q = qdisc_priv(sch);
f38c39d6 75 struct Qdisc *child = q->qdisc;
8bdc2acd 76 unsigned int len;
f38c39d6 77 int ret;
1da177e4 78
eeca6688
ED
79 q->vars.qavg = red_calc_qavg(&q->parms,
80 &q->vars,
81 child->qstats.backlog);
1da177e4 82
eeca6688
ED
83 if (red_is_idling(&q->vars))
84 red_end_of_idle_period(&q->vars);
1da177e4 85
eeca6688 86 switch (red_action(&q->parms, &q->vars, q->vars.qavg)) {
cc7ec456
ED
87 case RED_DONT_MARK:
88 break;
89
90 case RED_PROB_MARK:
25331d6c 91 qdisc_qstats_overlimit(sch);
0a7fad23 92 if (!red_use_ecn(q)) {
cc7ec456
ED
93 q->stats.prob_drop++;
94 goto congestion_drop;
95 }
96
0a7fad23
PM
97 if (INET_ECN_set_ce(skb)) {
98 q->stats.prob_mark++;
55f656cd 99 skb = tcf_qevent_handle(&q->qe_mark, sch, skb, to_free, &ret);
aee9caa0
PM
100 if (!skb)
101 return NET_XMIT_CN | ret;
0a7fad23
PM
102 } else if (!red_use_nodrop(q)) {
103 q->stats.prob_drop++;
104 goto congestion_drop;
105 }
106
107 /* Non-ECT packet in ECN nodrop mode: queue it. */
cc7ec456
ED
108 break;
109
110 case RED_HARD_MARK:
ff9f17ce 111 reason = SKB_DROP_REASON_QDISC_OVERLIMIT;
25331d6c 112 qdisc_qstats_overlimit(sch);
0a7fad23
PM
113 if (red_use_harddrop(q) || !red_use_ecn(q)) {
114 q->stats.forced_drop++;
115 goto congestion_drop;
116 }
117
118 if (INET_ECN_set_ce(skb)) {
119 q->stats.forced_mark++;
55f656cd 120 skb = tcf_qevent_handle(&q->qe_mark, sch, skb, to_free, &ret);
aee9caa0
PM
121 if (!skb)
122 return NET_XMIT_CN | ret;
0a7fad23 123 } else if (!red_use_nodrop(q)) {
cc7ec456
ED
124 q->stats.forced_drop++;
125 goto congestion_drop;
126 }
127
0a7fad23 128 /* Non-ECT packet in ECN nodrop mode: queue it. */
cc7ec456 129 break;
1da177e4
LT
130 }
131
8bdc2acd 132 len = qdisc_pkt_len(skb);
ac5c66f2 133 ret = qdisc_enqueue(skb, child, to_free);
f38c39d6 134 if (likely(ret == NET_XMIT_SUCCESS)) {
8bdc2acd 135 sch->qstats.backlog += len;
f38c39d6 136 sch->q.qlen++;
378a2f09 137 } else if (net_xmit_drop_count(ret)) {
f38c39d6 138 q->stats.pdrop++;
25331d6c 139 qdisc_qstats_drop(sch);
f38c39d6
PM
140 }
141 return ret;
6b31b28a
TG
142
143congestion_drop:
55f656cd 144 skb = tcf_qevent_handle(&q->qe_early_drop, sch, skb, to_free, &ret);
aee9caa0
PM
145 if (!skb)
146 return NET_XMIT_CN | ret;
147
ff9f17ce 148 qdisc_drop_reason(skb, sch, to_free, reason);
1da177e4
LT
149 return NET_XMIT_CN;
150}
151
cc7ec456 152static struct sk_buff *red_dequeue(struct Qdisc *sch)
1da177e4
LT
153{
154 struct sk_buff *skb;
155 struct red_sched_data *q = qdisc_priv(sch);
f38c39d6 156 struct Qdisc *child = q->qdisc;
1da177e4 157
f38c39d6 158 skb = child->dequeue(child);
9190b3b3
ED
159 if (skb) {
160 qdisc_bstats_update(sch, skb);
d7f4f332 161 qdisc_qstats_backlog_dec(sch, skb);
f38c39d6 162 sch->q.qlen--;
9190b3b3 163 } else {
eeca6688
ED
164 if (!red_is_idling(&q->vars))
165 red_start_of_idle_period(&q->vars);
9190b3b3 166 }
9e178ff2 167 return skb;
1da177e4
LT
168}
169
cc7ec456 170static struct sk_buff *red_peek(struct Qdisc *sch)
8e3af978
JP
171{
172 struct red_sched_data *q = qdisc_priv(sch);
173 struct Qdisc *child = q->qdisc;
174
175 return child->ops->peek(child);
176}
177
cc7ec456 178static void red_reset(struct Qdisc *sch)
1da177e4
LT
179{
180 struct red_sched_data *q = qdisc_priv(sch);
181
f38c39d6 182 qdisc_reset(q->qdisc);
eeca6688 183 red_restart(&q->vars);
1da177e4
LT
184}
185
602f3baf
NF
186static int red_offload(struct Qdisc *sch, bool enable)
187{
188 struct red_sched_data *q = qdisc_priv(sch);
189 struct net_device *dev = qdisc_dev(sch);
190 struct tc_red_qopt_offload opt = {
191 .handle = sch->handle,
192 .parent = sch->parent,
193 };
194
195 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
196 return -EOPNOTSUPP;
197
198 if (enable) {
199 opt.command = TC_RED_REPLACE;
200 opt.set.min = q->parms.qth_min >> q->parms.Wlog;
201 opt.set.max = q->parms.qth_max >> q->parms.Wlog;
202 opt.set.probability = q->parms.max_P;
c0b7490b 203 opt.set.limit = q->limit;
602f3baf 204 opt.set.is_ecn = red_use_ecn(q);
190852a5 205 opt.set.is_harddrop = red_use_harddrop(q);
0a7fad23 206 opt.set.is_nodrop = red_use_nodrop(q);
416ef9b1 207 opt.set.qstats = &sch->qstats;
602f3baf
NF
208 } else {
209 opt.command = TC_RED_DESTROY;
210 }
211
8234af2d 212 return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED, &opt);
602f3baf
NF
213}
214
f38c39d6
PM
215static void red_destroy(struct Qdisc *sch)
216{
217 struct red_sched_data *q = qdisc_priv(sch);
8af2a218 218
aee9caa0
PM
219 tcf_qevent_destroy(&q->qe_mark, sch);
220 tcf_qevent_destroy(&q->qe_early_drop, sch);
8fa7292f 221 timer_delete_sync(&q->adapt_timer);
602f3baf 222 red_offload(sch, false);
86bd446b 223 qdisc_put(q->qdisc);
f38c39d6
PM
224}
225
27a3421e 226static const struct nla_policy red_policy[TCA_RED_MAX + 1] = {
14bc175d 227 [TCA_RED_UNSPEC] = { .strict_start_type = TCA_RED_FLAGS },
27a3421e
PM
228 [TCA_RED_PARMS] = { .len = sizeof(struct tc_red_qopt) },
229 [TCA_RED_STAB] = { .len = RED_STAB_SIZE },
a73ed26b 230 [TCA_RED_MAX_P] = { .type = NLA_U32 },
47a1494b 231 [TCA_RED_FLAGS] = NLA_POLICY_BITFIELD32(TC_RED_SUPPORTED_FLAGS),
aee9caa0
PM
232 [TCA_RED_EARLY_DROP_BLOCK] = { .type = NLA_U32 },
233 [TCA_RED_MARK_BLOCK] = { .type = NLA_U32 },
27a3421e
PM
234};
235
65545ea2
PM
236static int __red_change(struct Qdisc *sch, struct nlattr **tb,
237 struct netlink_ext_ack *extack)
1da177e4 238{
0c8d13ac 239 struct Qdisc *old_child = NULL, *child = NULL;
1da177e4 240 struct red_sched_data *q = qdisc_priv(sch);
14bc175d 241 struct nla_bitfield32 flags_bf;
1da177e4 242 struct tc_red_qopt *ctl;
14bc175d
PM
243 unsigned char userbits;
244 unsigned char flags;
cee63723 245 int err;
a73ed26b 246 u32 max_P;
e323d865 247 u8 *stab;
1da177e4 248
1e90474c 249 if (tb[TCA_RED_PARMS] == NULL ||
27a3421e 250 tb[TCA_RED_STAB] == NULL)
1da177e4
LT
251 return -EINVAL;
252
a885a6b2 253 max_P = nla_get_u32_default(tb[TCA_RED_MAX_P], 0);
a73ed26b 254
1e90474c 255 ctl = nla_data(tb[TCA_RED_PARMS]);
e323d865
ED
256 stab = nla_data(tb[TCA_RED_STAB]);
257 if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog,
258 ctl->Scell_log, stab))
8afa10cb 259 return -EINVAL;
1da177e4 260
14bc175d 261 err = red_get_flags(ctl->flags, TC_RED_HISTORIC_FLAGS,
47a1494b 262 tb[TCA_RED_FLAGS], TC_RED_SUPPORTED_FLAGS,
14bc175d
PM
263 &flags_bf, &userbits, extack);
264 if (err)
265 return err;
266
f38c39d6 267 if (ctl->limit > 0) {
a38a9882
AA
268 child = fifo_create_dflt(sch, &bfifo_qdisc_ops, ctl->limit,
269 extack);
fb0305ce
PM
270 if (IS_ERR(child))
271 return PTR_ERR(child);
f38c39d6 272
44a63b13 273 /* child is fifo, no need to check for noop_qdisc */
49b49971 274 qdisc_hash_add(child, true);
44a63b13
PA
275 }
276
1da177e4 277 sch_tree_lock(sch);
14bc175d
PM
278
279 flags = (q->flags & ~flags_bf.selector) | flags_bf.value;
280 err = red_validate_flags(flags, extack);
281 if (err)
282 goto unlock_out;
283
284 q->flags = flags;
285 q->userbits = userbits;
1da177e4 286 q->limit = ctl->limit;
5e50da01 287 if (child) {
85a3e0ed 288 qdisc_purge_queue(q->qdisc);
0c8d13ac 289 old_child = q->qdisc;
b94c8afc 290 q->qdisc = child;
5e50da01 291 }
1da177e4 292
eeca6688
ED
293 red_set_parms(&q->parms,
294 ctl->qth_min, ctl->qth_max, ctl->Wlog,
a73ed26b 295 ctl->Plog, ctl->Scell_log,
e323d865 296 stab,
a73ed26b 297 max_P);
eeca6688 298 red_set_vars(&q->vars);
6b31b28a 299
8fa7292f 300 timer_delete(&q->adapt_timer);
8af2a218
ED
301 if (ctl->flags & TC_RED_ADAPTATIVE)
302 mod_timer(&q->adapt_timer, jiffies + HZ/2);
303
1ee5fa1e 304 if (!q->qdisc->q.qlen)
eeca6688 305 red_start_of_idle_period(&q->vars);
dba051f3 306
1da177e4 307 sch_tree_unlock(sch);
0c8d13ac 308
602f3baf 309 red_offload(sch, true);
0c8d13ac
JK
310
311 if (old_child)
312 qdisc_put(old_child);
1da177e4 313 return 0;
14bc175d
PM
314
315unlock_out:
316 sch_tree_unlock(sch);
317 if (child)
318 qdisc_put(child);
319 return err;
1da177e4
LT
320}
321
cdeabbb8 322static inline void red_adaptative_timer(struct timer_list *t)
8af2a218 323{
41cb0855 324 struct red_sched_data *q = timer_container_of(q, t, adapt_timer);
cdeabbb8 325 struct Qdisc *sch = q->sch;
d636fc5d 326 spinlock_t *root_lock;
8af2a218 327
d636fc5d
ED
328 rcu_read_lock();
329 root_lock = qdisc_lock(qdisc_root_sleeping(sch));
8af2a218 330 spin_lock(root_lock);
eeca6688 331 red_adaptative_algo(&q->parms, &q->vars);
8af2a218
ED
332 mod_timer(&q->adapt_timer, jiffies + HZ/2);
333 spin_unlock(root_lock);
d636fc5d 334 rcu_read_unlock();
8af2a218
ED
335}
336
e63d7dfd
AA
337static int red_init(struct Qdisc *sch, struct nlattr *opt,
338 struct netlink_ext_ack *extack)
1da177e4 339{
f38c39d6 340 struct red_sched_data *q = qdisc_priv(sch);
65545ea2
PM
341 struct nlattr *tb[TCA_RED_MAX + 1];
342 int err;
343
608b4ada
CW
344 q->qdisc = &noop_qdisc;
345 q->sch = sch;
346 timer_setup(&q->adapt_timer, red_adaptative_timer, 0);
347
65545ea2
PM
348 if (!opt)
349 return -EINVAL;
350
351 err = nla_parse_nested_deprecated(tb, TCA_RED_MAX, opt, red_policy,
352 extack);
353 if (err < 0)
354 return err;
f38c39d6 355
aee9caa0
PM
356 err = __red_change(sch, tb, extack);
357 if (err)
358 return err;
359
360 err = tcf_qevent_init(&q->qe_early_drop, sch,
361 FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP,
362 tb[TCA_RED_EARLY_DROP_BLOCK], extack);
363 if (err)
5438dd45 364 return err;
aee9caa0 365
5438dd45
CW
366 return tcf_qevent_init(&q->qe_mark, sch,
367 FLOW_BLOCK_BINDER_TYPE_RED_MARK,
368 tb[TCA_RED_MARK_BLOCK], extack);
65545ea2
PM
369}
370
371static int red_change(struct Qdisc *sch, struct nlattr *opt,
372 struct netlink_ext_ack *extack)
373{
aee9caa0 374 struct red_sched_data *q = qdisc_priv(sch);
65545ea2
PM
375 struct nlattr *tb[TCA_RED_MAX + 1];
376 int err;
377
65545ea2
PM
378 err = nla_parse_nested_deprecated(tb, TCA_RED_MAX, opt, red_policy,
379 extack);
380 if (err < 0)
381 return err;
382
aee9caa0
PM
383 err = tcf_qevent_validate_change(&q->qe_early_drop,
384 tb[TCA_RED_EARLY_DROP_BLOCK], extack);
385 if (err)
386 return err;
387
388 err = tcf_qevent_validate_change(&q->qe_mark,
389 tb[TCA_RED_MARK_BLOCK], extack);
390 if (err)
391 return err;
392
65545ea2 393 return __red_change(sch, tb, extack);
1da177e4
LT
394}
395
dad54c0f 396static int red_dump_offload_stats(struct Qdisc *sch)
602f3baf 397{
602f3baf 398 struct tc_red_qopt_offload hw_stats = {
ee9d3429 399 .command = TC_RED_STATS,
602f3baf
NF
400 .handle = sch->handle,
401 .parent = sch->parent,
ee9d3429
AM
402 {
403 .stats.bstats = &sch->bstats,
404 .stats.qstats = &sch->qstats,
405 },
602f3baf 406 };
8234af2d 407
b592843c 408 return qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_RED, &hw_stats);
602f3baf
NF
409}
410
1da177e4
LT
411static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
412{
413 struct red_sched_data *q = qdisc_priv(sch);
1e90474c 414 struct nlattr *opts = NULL;
6b31b28a
TG
415 struct tc_red_qopt opt = {
416 .limit = q->limit,
14bc175d
PM
417 .flags = (q->flags & TC_RED_HISTORIC_FLAGS) |
418 q->userbits,
6b31b28a
TG
419 .qth_min = q->parms.qth_min >> q->parms.Wlog,
420 .qth_max = q->parms.qth_max >> q->parms.Wlog,
421 .Wlog = q->parms.Wlog,
422 .Plog = q->parms.Plog,
423 .Scell_log = q->parms.Scell_log,
424 };
602f3baf 425 int err;
1da177e4 426
dad54c0f 427 err = red_dump_offload_stats(sch);
602f3baf
NF
428 if (err)
429 goto nla_put_failure;
430
ae0be8de 431 opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
1e90474c
PM
432 if (opts == NULL)
433 goto nla_put_failure;
1b34ec43 434 if (nla_put(skb, TCA_RED_PARMS, sizeof(opt), &opt) ||
14bc175d 435 nla_put_u32(skb, TCA_RED_MAX_P, q->parms.max_P) ||
8953b077 436 nla_put_bitfield32(skb, TCA_RED_FLAGS,
aee9caa0
PM
437 q->flags, TC_RED_SUPPORTED_FLAGS) ||
438 tcf_qevent_dump(skb, TCA_RED_MARK_BLOCK, &q->qe_mark) ||
439 tcf_qevent_dump(skb, TCA_RED_EARLY_DROP_BLOCK, &q->qe_early_drop))
1b34ec43 440 goto nla_put_failure;
1e90474c 441 return nla_nest_end(skb, opts);
1da177e4 442
1e90474c 443nla_put_failure:
bc3ed28c
TG
444 nla_nest_cancel(skb, opts);
445 return -EMSGSIZE;
1da177e4
LT
446}
447
448static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
449{
450 struct red_sched_data *q = qdisc_priv(sch);
602f3baf 451 struct net_device *dev = qdisc_dev(sch);
f8253df5 452 struct tc_red_xstats st = {0};
6b31b28a 453
428a68af 454 if (sch->flags & TCQ_F_OFFLOADED) {
602f3baf 455 struct tc_red_qopt_offload hw_stats_request = {
ee9d3429 456 .command = TC_RED_XSTATS,
602f3baf
NF
457 .handle = sch->handle,
458 .parent = sch->parent,
ee9d3429 459 {
f8253df5 460 .xstats = &q->stats,
ee9d3429 461 },
602f3baf 462 };
f8253df5
NF
463 dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED,
464 &hw_stats_request);
602f3baf 465 }
f8253df5
NF
466 st.early = q->stats.prob_drop + q->stats.forced_drop;
467 st.pdrop = q->stats.pdrop;
f8253df5 468 st.marked = q->stats.prob_mark + q->stats.forced_mark;
602f3baf 469
6b31b28a 470 return gnet_stats_copy_app(d, &st, sizeof(st));
1da177e4
LT
471}
472
f38c39d6
PM
473static int red_dump_class(struct Qdisc *sch, unsigned long cl,
474 struct sk_buff *skb, struct tcmsg *tcm)
475{
476 struct red_sched_data *q = qdisc_priv(sch);
477
f38c39d6
PM
478 tcm->tcm_handle |= TC_H_MIN(1);
479 tcm->tcm_info = q->qdisc->handle;
480 return 0;
481}
482
bf2a752b
JK
483static void red_graft_offload(struct Qdisc *sch,
484 struct Qdisc *new, struct Qdisc *old,
485 struct netlink_ext_ack *extack)
486{
487 struct tc_red_qopt_offload graft_offload = {
488 .handle = sch->handle,
489 .parent = sch->parent,
490 .child_handle = new->handle,
491 .command = TC_RED_GRAFT,
492 };
493
494 qdisc_offload_graft_helper(qdisc_dev(sch), sch, new, old,
495 TC_SETUP_QDISC_RED, &graft_offload, extack);
496}
497
f38c39d6 498static int red_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
653d6fd6 499 struct Qdisc **old, struct netlink_ext_ack *extack)
f38c39d6
PM
500{
501 struct red_sched_data *q = qdisc_priv(sch);
502
503 if (new == NULL)
504 new = &noop_qdisc;
505
86a7996c 506 *old = qdisc_replace(sch, new, &q->qdisc);
bf2a752b
JK
507
508 red_graft_offload(sch, new, *old, extack);
f38c39d6
PM
509 return 0;
510}
511
512static struct Qdisc *red_leaf(struct Qdisc *sch, unsigned long arg)
513{
514 struct red_sched_data *q = qdisc_priv(sch);
515 return q->qdisc;
516}
517
143976ce 518static unsigned long red_find(struct Qdisc *sch, u32 classid)
f38c39d6
PM
519{
520 return 1;
521}
522
f38c39d6
PM
523static void red_walk(struct Qdisc *sch, struct qdisc_walker *walker)
524{
525 if (!walker->stop) {
e046fa89 526 tc_qdisc_stats_dump(sch, 1, walker);
f38c39d6
PM
527 }
528}
529
20fea08b 530static const struct Qdisc_class_ops red_class_ops = {
f38c39d6
PM
531 .graft = red_graft,
532 .leaf = red_leaf,
143976ce 533 .find = red_find,
f38c39d6 534 .walk = red_walk,
f38c39d6
PM
535 .dump = red_dump_class,
536};
537
20fea08b 538static struct Qdisc_ops red_qdisc_ops __read_mostly = {
1da177e4
LT
539 .id = "red",
540 .priv_size = sizeof(struct red_sched_data),
f38c39d6 541 .cl_ops = &red_class_ops,
1da177e4
LT
542 .enqueue = red_enqueue,
543 .dequeue = red_dequeue,
8e3af978 544 .peek = red_peek,
1da177e4
LT
545 .init = red_init,
546 .reset = red_reset,
f38c39d6 547 .destroy = red_destroy,
1da177e4
LT
548 .change = red_change,
549 .dump = red_dump,
550 .dump_stats = red_dump_stats,
551 .owner = THIS_MODULE,
552};
241a94ab 553MODULE_ALIAS_NET_SCH("red");
1da177e4
LT
554
555static int __init red_module_init(void)
556{
557 return register_qdisc(&red_qdisc_ops);
558}
dba051f3
TG
559
560static void __exit red_module_exit(void)
1da177e4
LT
561{
562 unregister_qdisc(&red_qdisc_ops);
563}
dba051f3 564
1da177e4
LT
565module_init(red_module_init)
566module_exit(red_module_exit)
dba051f3 567
1da177e4 568MODULE_LICENSE("GPL");
f96118c5 569MODULE_DESCRIPTION("Random Early Detection qdisc");