Merge tag '9p-6.3-for-linus-part1' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-block.git] / net / sched / sch_red.c
CommitLineData
2874c5fd 1// SPDX-License-Identifier: GPL-2.0-or-later
1da177e4
LT
2/*
3 * net/sched/sch_red.c Random Early Detection queue.
4 *
1da177e4
LT
5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6 *
7 * Changes:
dba051f3 8 * J Hadi Salim 980914: computation fixes
1da177e4 9 * Alexey Makarenko <makar@phoenix.kharkov.ua> 990814: qave on idle link was calculated incorrectly.
dba051f3 10 * J Hadi Salim 980816: ECN support
1da177e4
LT
11 */
12
1da177e4 13#include <linux/module.h>
1da177e4
LT
14#include <linux/types.h>
15#include <linux/kernel.h>
1da177e4 16#include <linux/skbuff.h>
1da177e4 17#include <net/pkt_sched.h>
602f3baf 18#include <net/pkt_cls.h>
1da177e4 19#include <net/inet_ecn.h>
6b31b28a 20#include <net/red.h>
1da177e4
LT
21
22
6b31b28a 23/* Parameters, settable by user:
1da177e4
LT
24 -----------------------------
25
26 limit - bytes (must be > qth_max + burst)
27
28 Hard limit on queue length, should be chosen >qth_max
29 to allow packet bursts. This parameter does not
30 affect the algorithms behaviour and can be chosen
31 arbitrarily high (well, less than ram size)
32 Really, this limit will never be reached
33 if RED works correctly.
1da177e4
LT
34 */
35
cc7ec456 36struct red_sched_data {
6b31b28a 37 u32 limit; /* HARD maximal queue length */
14bc175d 38
6b31b28a 39 unsigned char flags;
14bc175d
PM
40 /* Non-flags in tc_red_qopt.flags. */
41 unsigned char userbits;
42
8af2a218 43 struct timer_list adapt_timer;
cdeabbb8 44 struct Qdisc *sch;
6b31b28a 45 struct red_parms parms;
eeca6688 46 struct red_vars vars;
6b31b28a 47 struct red_stats stats;
f38c39d6 48 struct Qdisc *qdisc;
aee9caa0
PM
49 struct tcf_qevent qe_early_drop;
50 struct tcf_qevent qe_mark;
1da177e4
LT
51};
52
47a1494b 53#define TC_RED_SUPPORTED_FLAGS (TC_RED_HISTORIC_FLAGS | TC_RED_NODROP)
14bc175d 54
6b31b28a 55static inline int red_use_ecn(struct red_sched_data *q)
1da177e4 56{
6b31b28a 57 return q->flags & TC_RED_ECN;
1da177e4
LT
58}
59
bdc450a0
TG
60static inline int red_use_harddrop(struct red_sched_data *q)
61{
62 return q->flags & TC_RED_HARDDROP;
63}
64
0a7fad23
PM
65static int red_use_nodrop(struct red_sched_data *q)
66{
67 return q->flags & TC_RED_NODROP;
68}
69
ac5c66f2 70static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch,
520ac30f 71 struct sk_buff **to_free)
1da177e4
LT
72{
73 struct red_sched_data *q = qdisc_priv(sch);
f38c39d6 74 struct Qdisc *child = q->qdisc;
8bdc2acd 75 unsigned int len;
f38c39d6 76 int ret;
1da177e4 77
eeca6688
ED
78 q->vars.qavg = red_calc_qavg(&q->parms,
79 &q->vars,
80 child->qstats.backlog);
1da177e4 81
eeca6688
ED
82 if (red_is_idling(&q->vars))
83 red_end_of_idle_period(&q->vars);
1da177e4 84
eeca6688 85 switch (red_action(&q->parms, &q->vars, q->vars.qavg)) {
cc7ec456
ED
86 case RED_DONT_MARK:
87 break;
88
89 case RED_PROB_MARK:
25331d6c 90 qdisc_qstats_overlimit(sch);
0a7fad23 91 if (!red_use_ecn(q)) {
cc7ec456
ED
92 q->stats.prob_drop++;
93 goto congestion_drop;
94 }
95
0a7fad23
PM
96 if (INET_ECN_set_ce(skb)) {
97 q->stats.prob_mark++;
55f656cd 98 skb = tcf_qevent_handle(&q->qe_mark, sch, skb, to_free, &ret);
aee9caa0
PM
99 if (!skb)
100 return NET_XMIT_CN | ret;
0a7fad23
PM
101 } else if (!red_use_nodrop(q)) {
102 q->stats.prob_drop++;
103 goto congestion_drop;
104 }
105
106 /* Non-ECT packet in ECN nodrop mode: queue it. */
cc7ec456
ED
107 break;
108
109 case RED_HARD_MARK:
25331d6c 110 qdisc_qstats_overlimit(sch);
0a7fad23
PM
111 if (red_use_harddrop(q) || !red_use_ecn(q)) {
112 q->stats.forced_drop++;
113 goto congestion_drop;
114 }
115
116 if (INET_ECN_set_ce(skb)) {
117 q->stats.forced_mark++;
55f656cd 118 skb = tcf_qevent_handle(&q->qe_mark, sch, skb, to_free, &ret);
aee9caa0
PM
119 if (!skb)
120 return NET_XMIT_CN | ret;
0a7fad23 121 } else if (!red_use_nodrop(q)) {
cc7ec456
ED
122 q->stats.forced_drop++;
123 goto congestion_drop;
124 }
125
0a7fad23 126 /* Non-ECT packet in ECN nodrop mode: queue it. */
cc7ec456 127 break;
1da177e4
LT
128 }
129
8bdc2acd 130 len = qdisc_pkt_len(skb);
ac5c66f2 131 ret = qdisc_enqueue(skb, child, to_free);
f38c39d6 132 if (likely(ret == NET_XMIT_SUCCESS)) {
8bdc2acd 133 sch->qstats.backlog += len;
f38c39d6 134 sch->q.qlen++;
378a2f09 135 } else if (net_xmit_drop_count(ret)) {
f38c39d6 136 q->stats.pdrop++;
25331d6c 137 qdisc_qstats_drop(sch);
f38c39d6
PM
138 }
139 return ret;
6b31b28a
TG
140
141congestion_drop:
55f656cd 142 skb = tcf_qevent_handle(&q->qe_early_drop, sch, skb, to_free, &ret);
aee9caa0
PM
143 if (!skb)
144 return NET_XMIT_CN | ret;
145
520ac30f 146 qdisc_drop(skb, sch, to_free);
1da177e4
LT
147 return NET_XMIT_CN;
148}
149
cc7ec456 150static struct sk_buff *red_dequeue(struct Qdisc *sch)
1da177e4
LT
151{
152 struct sk_buff *skb;
153 struct red_sched_data *q = qdisc_priv(sch);
f38c39d6 154 struct Qdisc *child = q->qdisc;
1da177e4 155
f38c39d6 156 skb = child->dequeue(child);
9190b3b3
ED
157 if (skb) {
158 qdisc_bstats_update(sch, skb);
d7f4f332 159 qdisc_qstats_backlog_dec(sch, skb);
f38c39d6 160 sch->q.qlen--;
9190b3b3 161 } else {
eeca6688
ED
162 if (!red_is_idling(&q->vars))
163 red_start_of_idle_period(&q->vars);
9190b3b3 164 }
9e178ff2 165 return skb;
1da177e4
LT
166}
167
cc7ec456 168static struct sk_buff *red_peek(struct Qdisc *sch)
8e3af978
JP
169{
170 struct red_sched_data *q = qdisc_priv(sch);
171 struct Qdisc *child = q->qdisc;
172
173 return child->ops->peek(child);
174}
175
cc7ec456 176static void red_reset(struct Qdisc *sch)
1da177e4
LT
177{
178 struct red_sched_data *q = qdisc_priv(sch);
179
f38c39d6 180 qdisc_reset(q->qdisc);
eeca6688 181 red_restart(&q->vars);
1da177e4
LT
182}
183
602f3baf
NF
184static int red_offload(struct Qdisc *sch, bool enable)
185{
186 struct red_sched_data *q = qdisc_priv(sch);
187 struct net_device *dev = qdisc_dev(sch);
188 struct tc_red_qopt_offload opt = {
189 .handle = sch->handle,
190 .parent = sch->parent,
191 };
192
193 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
194 return -EOPNOTSUPP;
195
196 if (enable) {
197 opt.command = TC_RED_REPLACE;
198 opt.set.min = q->parms.qth_min >> q->parms.Wlog;
199 opt.set.max = q->parms.qth_max >> q->parms.Wlog;
200 opt.set.probability = q->parms.max_P;
c0b7490b 201 opt.set.limit = q->limit;
602f3baf 202 opt.set.is_ecn = red_use_ecn(q);
190852a5 203 opt.set.is_harddrop = red_use_harddrop(q);
0a7fad23 204 opt.set.is_nodrop = red_use_nodrop(q);
416ef9b1 205 opt.set.qstats = &sch->qstats;
602f3baf
NF
206 } else {
207 opt.command = TC_RED_DESTROY;
208 }
209
8234af2d 210 return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED, &opt);
602f3baf
NF
211}
212
f38c39d6
PM
213static void red_destroy(struct Qdisc *sch)
214{
215 struct red_sched_data *q = qdisc_priv(sch);
8af2a218 216
aee9caa0
PM
217 tcf_qevent_destroy(&q->qe_mark, sch);
218 tcf_qevent_destroy(&q->qe_early_drop, sch);
8af2a218 219 del_timer_sync(&q->adapt_timer);
602f3baf 220 red_offload(sch, false);
86bd446b 221 qdisc_put(q->qdisc);
f38c39d6
PM
222}
223
27a3421e 224static const struct nla_policy red_policy[TCA_RED_MAX + 1] = {
14bc175d 225 [TCA_RED_UNSPEC] = { .strict_start_type = TCA_RED_FLAGS },
27a3421e
PM
226 [TCA_RED_PARMS] = { .len = sizeof(struct tc_red_qopt) },
227 [TCA_RED_STAB] = { .len = RED_STAB_SIZE },
a73ed26b 228 [TCA_RED_MAX_P] = { .type = NLA_U32 },
47a1494b 229 [TCA_RED_FLAGS] = NLA_POLICY_BITFIELD32(TC_RED_SUPPORTED_FLAGS),
aee9caa0
PM
230 [TCA_RED_EARLY_DROP_BLOCK] = { .type = NLA_U32 },
231 [TCA_RED_MARK_BLOCK] = { .type = NLA_U32 },
27a3421e
PM
232};
233
65545ea2
PM
234static int __red_change(struct Qdisc *sch, struct nlattr **tb,
235 struct netlink_ext_ack *extack)
1da177e4 236{
0c8d13ac 237 struct Qdisc *old_child = NULL, *child = NULL;
1da177e4 238 struct red_sched_data *q = qdisc_priv(sch);
14bc175d 239 struct nla_bitfield32 flags_bf;
1da177e4 240 struct tc_red_qopt *ctl;
14bc175d
PM
241 unsigned char userbits;
242 unsigned char flags;
cee63723 243 int err;
a73ed26b 244 u32 max_P;
e323d865 245 u8 *stab;
1da177e4 246
1e90474c 247 if (tb[TCA_RED_PARMS] == NULL ||
27a3421e 248 tb[TCA_RED_STAB] == NULL)
1da177e4
LT
249 return -EINVAL;
250
a73ed26b
ED
251 max_P = tb[TCA_RED_MAX_P] ? nla_get_u32(tb[TCA_RED_MAX_P]) : 0;
252
1e90474c 253 ctl = nla_data(tb[TCA_RED_PARMS]);
e323d865
ED
254 stab = nla_data(tb[TCA_RED_STAB]);
255 if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog,
256 ctl->Scell_log, stab))
8afa10cb 257 return -EINVAL;
1da177e4 258
14bc175d 259 err = red_get_flags(ctl->flags, TC_RED_HISTORIC_FLAGS,
47a1494b 260 tb[TCA_RED_FLAGS], TC_RED_SUPPORTED_FLAGS,
14bc175d
PM
261 &flags_bf, &userbits, extack);
262 if (err)
263 return err;
264
f38c39d6 265 if (ctl->limit > 0) {
a38a9882
AA
266 child = fifo_create_dflt(sch, &bfifo_qdisc_ops, ctl->limit,
267 extack);
fb0305ce
PM
268 if (IS_ERR(child))
269 return PTR_ERR(child);
f38c39d6 270
44a63b13 271 /* child is fifo, no need to check for noop_qdisc */
49b49971 272 qdisc_hash_add(child, true);
44a63b13
PA
273 }
274
1da177e4 275 sch_tree_lock(sch);
14bc175d
PM
276
277 flags = (q->flags & ~flags_bf.selector) | flags_bf.value;
278 err = red_validate_flags(flags, extack);
279 if (err)
280 goto unlock_out;
281
282 q->flags = flags;
283 q->userbits = userbits;
1da177e4 284 q->limit = ctl->limit;
5e50da01 285 if (child) {
e5f0e8f8 286 qdisc_tree_flush_backlog(q->qdisc);
0c8d13ac 287 old_child = q->qdisc;
b94c8afc 288 q->qdisc = child;
5e50da01 289 }
1da177e4 290
eeca6688
ED
291 red_set_parms(&q->parms,
292 ctl->qth_min, ctl->qth_max, ctl->Wlog,
a73ed26b 293 ctl->Plog, ctl->Scell_log,
e323d865 294 stab,
a73ed26b 295 max_P);
eeca6688 296 red_set_vars(&q->vars);
6b31b28a 297
8af2a218
ED
298 del_timer(&q->adapt_timer);
299 if (ctl->flags & TC_RED_ADAPTATIVE)
300 mod_timer(&q->adapt_timer, jiffies + HZ/2);
301
1ee5fa1e 302 if (!q->qdisc->q.qlen)
eeca6688 303 red_start_of_idle_period(&q->vars);
dba051f3 304
1da177e4 305 sch_tree_unlock(sch);
0c8d13ac 306
602f3baf 307 red_offload(sch, true);
0c8d13ac
JK
308
309 if (old_child)
310 qdisc_put(old_child);
1da177e4 311 return 0;
14bc175d
PM
312
313unlock_out:
314 sch_tree_unlock(sch);
315 if (child)
316 qdisc_put(child);
317 return err;
1da177e4
LT
318}
319
cdeabbb8 320static inline void red_adaptative_timer(struct timer_list *t)
8af2a218 321{
cdeabbb8
KC
322 struct red_sched_data *q = from_timer(q, t, adapt_timer);
323 struct Qdisc *sch = q->sch;
8af2a218
ED
324 spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
325
326 spin_lock(root_lock);
eeca6688 327 red_adaptative_algo(&q->parms, &q->vars);
8af2a218
ED
328 mod_timer(&q->adapt_timer, jiffies + HZ/2);
329 spin_unlock(root_lock);
330}
331
e63d7dfd
AA
332static int red_init(struct Qdisc *sch, struct nlattr *opt,
333 struct netlink_ext_ack *extack)
1da177e4 334{
f38c39d6 335 struct red_sched_data *q = qdisc_priv(sch);
65545ea2
PM
336 struct nlattr *tb[TCA_RED_MAX + 1];
337 int err;
338
608b4ada
CW
339 q->qdisc = &noop_qdisc;
340 q->sch = sch;
341 timer_setup(&q->adapt_timer, red_adaptative_timer, 0);
342
65545ea2
PM
343 if (!opt)
344 return -EINVAL;
345
346 err = nla_parse_nested_deprecated(tb, TCA_RED_MAX, opt, red_policy,
347 extack);
348 if (err < 0)
349 return err;
f38c39d6 350
aee9caa0
PM
351 err = __red_change(sch, tb, extack);
352 if (err)
353 return err;
354
355 err = tcf_qevent_init(&q->qe_early_drop, sch,
356 FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP,
357 tb[TCA_RED_EARLY_DROP_BLOCK], extack);
358 if (err)
5438dd45 359 return err;
aee9caa0 360
5438dd45
CW
361 return tcf_qevent_init(&q->qe_mark, sch,
362 FLOW_BLOCK_BINDER_TYPE_RED_MARK,
363 tb[TCA_RED_MARK_BLOCK], extack);
65545ea2
PM
364}
365
366static int red_change(struct Qdisc *sch, struct nlattr *opt,
367 struct netlink_ext_ack *extack)
368{
aee9caa0 369 struct red_sched_data *q = qdisc_priv(sch);
65545ea2
PM
370 struct nlattr *tb[TCA_RED_MAX + 1];
371 int err;
372
65545ea2
PM
373 err = nla_parse_nested_deprecated(tb, TCA_RED_MAX, opt, red_policy,
374 extack);
375 if (err < 0)
376 return err;
377
aee9caa0
PM
378 err = tcf_qevent_validate_change(&q->qe_early_drop,
379 tb[TCA_RED_EARLY_DROP_BLOCK], extack);
380 if (err)
381 return err;
382
383 err = tcf_qevent_validate_change(&q->qe_mark,
384 tb[TCA_RED_MARK_BLOCK], extack);
385 if (err)
386 return err;
387
65545ea2 388 return __red_change(sch, tb, extack);
1da177e4
LT
389}
390
dad54c0f 391static int red_dump_offload_stats(struct Qdisc *sch)
602f3baf 392{
602f3baf 393 struct tc_red_qopt_offload hw_stats = {
ee9d3429 394 .command = TC_RED_STATS,
602f3baf
NF
395 .handle = sch->handle,
396 .parent = sch->parent,
ee9d3429
AM
397 {
398 .stats.bstats = &sch->bstats,
399 .stats.qstats = &sch->qstats,
400 },
602f3baf 401 };
8234af2d 402
b592843c 403 return qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_RED, &hw_stats);
602f3baf
NF
404}
405
1da177e4
LT
406static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
407{
408 struct red_sched_data *q = qdisc_priv(sch);
1e90474c 409 struct nlattr *opts = NULL;
6b31b28a
TG
410 struct tc_red_qopt opt = {
411 .limit = q->limit,
14bc175d
PM
412 .flags = (q->flags & TC_RED_HISTORIC_FLAGS) |
413 q->userbits,
6b31b28a
TG
414 .qth_min = q->parms.qth_min >> q->parms.Wlog,
415 .qth_max = q->parms.qth_max >> q->parms.Wlog,
416 .Wlog = q->parms.Wlog,
417 .Plog = q->parms.Plog,
418 .Scell_log = q->parms.Scell_log,
419 };
602f3baf 420 int err;
1da177e4 421
dad54c0f 422 err = red_dump_offload_stats(sch);
602f3baf
NF
423 if (err)
424 goto nla_put_failure;
425
ae0be8de 426 opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
1e90474c
PM
427 if (opts == NULL)
428 goto nla_put_failure;
1b34ec43 429 if (nla_put(skb, TCA_RED_PARMS, sizeof(opt), &opt) ||
14bc175d 430 nla_put_u32(skb, TCA_RED_MAX_P, q->parms.max_P) ||
8953b077 431 nla_put_bitfield32(skb, TCA_RED_FLAGS,
aee9caa0
PM
432 q->flags, TC_RED_SUPPORTED_FLAGS) ||
433 tcf_qevent_dump(skb, TCA_RED_MARK_BLOCK, &q->qe_mark) ||
434 tcf_qevent_dump(skb, TCA_RED_EARLY_DROP_BLOCK, &q->qe_early_drop))
1b34ec43 435 goto nla_put_failure;
1e90474c 436 return nla_nest_end(skb, opts);
1da177e4 437
1e90474c 438nla_put_failure:
bc3ed28c
TG
439 nla_nest_cancel(skb, opts);
440 return -EMSGSIZE;
1da177e4
LT
441}
442
443static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
444{
445 struct red_sched_data *q = qdisc_priv(sch);
602f3baf 446 struct net_device *dev = qdisc_dev(sch);
f8253df5 447 struct tc_red_xstats st = {0};
6b31b28a 448
428a68af 449 if (sch->flags & TCQ_F_OFFLOADED) {
602f3baf 450 struct tc_red_qopt_offload hw_stats_request = {
ee9d3429 451 .command = TC_RED_XSTATS,
602f3baf
NF
452 .handle = sch->handle,
453 .parent = sch->parent,
ee9d3429 454 {
f8253df5 455 .xstats = &q->stats,
ee9d3429 456 },
602f3baf 457 };
f8253df5
NF
458 dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED,
459 &hw_stats_request);
602f3baf 460 }
f8253df5
NF
461 st.early = q->stats.prob_drop + q->stats.forced_drop;
462 st.pdrop = q->stats.pdrop;
f8253df5 463 st.marked = q->stats.prob_mark + q->stats.forced_mark;
602f3baf 464
6b31b28a 465 return gnet_stats_copy_app(d, &st, sizeof(st));
1da177e4
LT
466}
467
f38c39d6
PM
468static int red_dump_class(struct Qdisc *sch, unsigned long cl,
469 struct sk_buff *skb, struct tcmsg *tcm)
470{
471 struct red_sched_data *q = qdisc_priv(sch);
472
f38c39d6
PM
473 tcm->tcm_handle |= TC_H_MIN(1);
474 tcm->tcm_info = q->qdisc->handle;
475 return 0;
476}
477
bf2a752b
JK
478static void red_graft_offload(struct Qdisc *sch,
479 struct Qdisc *new, struct Qdisc *old,
480 struct netlink_ext_ack *extack)
481{
482 struct tc_red_qopt_offload graft_offload = {
483 .handle = sch->handle,
484 .parent = sch->parent,
485 .child_handle = new->handle,
486 .command = TC_RED_GRAFT,
487 };
488
489 qdisc_offload_graft_helper(qdisc_dev(sch), sch, new, old,
490 TC_SETUP_QDISC_RED, &graft_offload, extack);
491}
492
f38c39d6 493static int red_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
653d6fd6 494 struct Qdisc **old, struct netlink_ext_ack *extack)
f38c39d6
PM
495{
496 struct red_sched_data *q = qdisc_priv(sch);
497
498 if (new == NULL)
499 new = &noop_qdisc;
500
86a7996c 501 *old = qdisc_replace(sch, new, &q->qdisc);
bf2a752b
JK
502
503 red_graft_offload(sch, new, *old, extack);
f38c39d6
PM
504 return 0;
505}
506
507static struct Qdisc *red_leaf(struct Qdisc *sch, unsigned long arg)
508{
509 struct red_sched_data *q = qdisc_priv(sch);
510 return q->qdisc;
511}
512
143976ce 513static unsigned long red_find(struct Qdisc *sch, u32 classid)
f38c39d6
PM
514{
515 return 1;
516}
517
f38c39d6
PM
518static void red_walk(struct Qdisc *sch, struct qdisc_walker *walker)
519{
520 if (!walker->stop) {
e046fa89 521 tc_qdisc_stats_dump(sch, 1, walker);
f38c39d6
PM
522 }
523}
524
20fea08b 525static const struct Qdisc_class_ops red_class_ops = {
f38c39d6
PM
526 .graft = red_graft,
527 .leaf = red_leaf,
143976ce 528 .find = red_find,
f38c39d6 529 .walk = red_walk,
f38c39d6
PM
530 .dump = red_dump_class,
531};
532
20fea08b 533static struct Qdisc_ops red_qdisc_ops __read_mostly = {
1da177e4
LT
534 .id = "red",
535 .priv_size = sizeof(struct red_sched_data),
f38c39d6 536 .cl_ops = &red_class_ops,
1da177e4
LT
537 .enqueue = red_enqueue,
538 .dequeue = red_dequeue,
8e3af978 539 .peek = red_peek,
1da177e4
LT
540 .init = red_init,
541 .reset = red_reset,
f38c39d6 542 .destroy = red_destroy,
1da177e4
LT
543 .change = red_change,
544 .dump = red_dump,
545 .dump_stats = red_dump_stats,
546 .owner = THIS_MODULE,
547};
548
549static int __init red_module_init(void)
550{
551 return register_qdisc(&red_qdisc_ops);
552}
dba051f3
TG
553
554static void __exit red_module_exit(void)
1da177e4
LT
555{
556 unregister_qdisc(&red_qdisc_ops);
557}
dba051f3 558
1da177e4
LT
559module_init(red_module_init)
560module_exit(red_module_exit)
dba051f3 561
1da177e4 562MODULE_LICENSE("GPL");