net: sched: sch: add extack for init callback
[linux-2.6-block.git] / net / sched / sch_red.c
CommitLineData
1da177e4
LT
1/*
2 * net/sched/sch_red.c Random Early Detection queue.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 *
11 * Changes:
dba051f3 12 * J Hadi Salim 980914: computation fixes
1da177e4 13 * Alexey Makarenko <makar@phoenix.kharkov.ua> 990814: qave on idle link was calculated incorrectly.
dba051f3 14 * J Hadi Salim 980816: ECN support
1da177e4
LT
15 */
16
1da177e4 17#include <linux/module.h>
1da177e4
LT
18#include <linux/types.h>
19#include <linux/kernel.h>
1da177e4 20#include <linux/skbuff.h>
1da177e4 21#include <net/pkt_sched.h>
602f3baf 22#include <net/pkt_cls.h>
1da177e4 23#include <net/inet_ecn.h>
6b31b28a 24#include <net/red.h>
1da177e4
LT
25
26
6b31b28a 27/* Parameters, settable by user:
1da177e4
LT
28 -----------------------------
29
30 limit - bytes (must be > qth_max + burst)
31
32 Hard limit on queue length, should be chosen >qth_max
33 to allow packet bursts. This parameter does not
34 affect the algorithms behaviour and can be chosen
35 arbitrarily high (well, less than ram size)
36 Really, this limit will never be reached
37 if RED works correctly.
1da177e4
LT
38 */
39
cc7ec456 40struct red_sched_data {
6b31b28a
TG
41 u32 limit; /* HARD maximal queue length */
42 unsigned char flags;
8af2a218 43 struct timer_list adapt_timer;
cdeabbb8 44 struct Qdisc *sch;
6b31b28a 45 struct red_parms parms;
eeca6688 46 struct red_vars vars;
6b31b28a 47 struct red_stats stats;
f38c39d6 48 struct Qdisc *qdisc;
1da177e4
LT
49};
50
6b31b28a 51static inline int red_use_ecn(struct red_sched_data *q)
1da177e4 52{
6b31b28a 53 return q->flags & TC_RED_ECN;
1da177e4
LT
54}
55
bdc450a0
TG
56static inline int red_use_harddrop(struct red_sched_data *q)
57{
58 return q->flags & TC_RED_HARDDROP;
59}
60
520ac30f
ED
61static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch,
62 struct sk_buff **to_free)
1da177e4
LT
63{
64 struct red_sched_data *q = qdisc_priv(sch);
f38c39d6
PM
65 struct Qdisc *child = q->qdisc;
66 int ret;
1da177e4 67
eeca6688
ED
68 q->vars.qavg = red_calc_qavg(&q->parms,
69 &q->vars,
70 child->qstats.backlog);
1da177e4 71
eeca6688
ED
72 if (red_is_idling(&q->vars))
73 red_end_of_idle_period(&q->vars);
1da177e4 74
eeca6688 75 switch (red_action(&q->parms, &q->vars, q->vars.qavg)) {
cc7ec456
ED
76 case RED_DONT_MARK:
77 break;
78
79 case RED_PROB_MARK:
25331d6c 80 qdisc_qstats_overlimit(sch);
cc7ec456
ED
81 if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) {
82 q->stats.prob_drop++;
83 goto congestion_drop;
84 }
85
86 q->stats.prob_mark++;
87 break;
88
89 case RED_HARD_MARK:
25331d6c 90 qdisc_qstats_overlimit(sch);
cc7ec456
ED
91 if (red_use_harddrop(q) || !red_use_ecn(q) ||
92 !INET_ECN_set_ce(skb)) {
93 q->stats.forced_drop++;
94 goto congestion_drop;
95 }
96
97 q->stats.forced_mark++;
98 break;
1da177e4
LT
99 }
100
520ac30f 101 ret = qdisc_enqueue(skb, child, to_free);
f38c39d6 102 if (likely(ret == NET_XMIT_SUCCESS)) {
d7f4f332 103 qdisc_qstats_backlog_inc(sch, skb);
f38c39d6 104 sch->q.qlen++;
378a2f09 105 } else if (net_xmit_drop_count(ret)) {
f38c39d6 106 q->stats.pdrop++;
25331d6c 107 qdisc_qstats_drop(sch);
f38c39d6
PM
108 }
109 return ret;
6b31b28a
TG
110
111congestion_drop:
520ac30f 112 qdisc_drop(skb, sch, to_free);
1da177e4
LT
113 return NET_XMIT_CN;
114}
115
cc7ec456 116static struct sk_buff *red_dequeue(struct Qdisc *sch)
1da177e4
LT
117{
118 struct sk_buff *skb;
119 struct red_sched_data *q = qdisc_priv(sch);
f38c39d6 120 struct Qdisc *child = q->qdisc;
1da177e4 121
f38c39d6 122 skb = child->dequeue(child);
9190b3b3
ED
123 if (skb) {
124 qdisc_bstats_update(sch, skb);
d7f4f332 125 qdisc_qstats_backlog_dec(sch, skb);
f38c39d6 126 sch->q.qlen--;
9190b3b3 127 } else {
eeca6688
ED
128 if (!red_is_idling(&q->vars))
129 red_start_of_idle_period(&q->vars);
9190b3b3 130 }
9e178ff2 131 return skb;
1da177e4
LT
132}
133
cc7ec456 134static struct sk_buff *red_peek(struct Qdisc *sch)
8e3af978
JP
135{
136 struct red_sched_data *q = qdisc_priv(sch);
137 struct Qdisc *child = q->qdisc;
138
139 return child->ops->peek(child);
140}
141
cc7ec456 142static void red_reset(struct Qdisc *sch)
1da177e4
LT
143{
144 struct red_sched_data *q = qdisc_priv(sch);
145
f38c39d6 146 qdisc_reset(q->qdisc);
d7f4f332 147 sch->qstats.backlog = 0;
f38c39d6 148 sch->q.qlen = 0;
eeca6688 149 red_restart(&q->vars);
1da177e4
LT
150}
151
602f3baf
NF
152static int red_offload(struct Qdisc *sch, bool enable)
153{
154 struct red_sched_data *q = qdisc_priv(sch);
155 struct net_device *dev = qdisc_dev(sch);
156 struct tc_red_qopt_offload opt = {
157 .handle = sch->handle,
158 .parent = sch->parent,
159 };
428a68af 160 int err;
602f3baf
NF
161
162 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
163 return -EOPNOTSUPP;
164
165 if (enable) {
166 opt.command = TC_RED_REPLACE;
167 opt.set.min = q->parms.qth_min >> q->parms.Wlog;
168 opt.set.max = q->parms.qth_max >> q->parms.Wlog;
169 opt.set.probability = q->parms.max_P;
170 opt.set.is_ecn = red_use_ecn(q);
171 } else {
172 opt.command = TC_RED_DESTROY;
173 }
174
428a68af
YM
175 err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED, &opt);
176
177 if (!err && enable)
178 sch->flags |= TCQ_F_OFFLOADED;
179 else
180 sch->flags &= ~TCQ_F_OFFLOADED;
181
182 return err;
602f3baf
NF
183}
184
f38c39d6
PM
185static void red_destroy(struct Qdisc *sch)
186{
187 struct red_sched_data *q = qdisc_priv(sch);
8af2a218
ED
188
189 del_timer_sync(&q->adapt_timer);
602f3baf 190 red_offload(sch, false);
f38c39d6
PM
191 qdisc_destroy(q->qdisc);
192}
193
27a3421e
PM
194static const struct nla_policy red_policy[TCA_RED_MAX + 1] = {
195 [TCA_RED_PARMS] = { .len = sizeof(struct tc_red_qopt) },
196 [TCA_RED_STAB] = { .len = RED_STAB_SIZE },
a73ed26b 197 [TCA_RED_MAX_P] = { .type = NLA_U32 },
27a3421e
PM
198};
199
1e90474c 200static int red_change(struct Qdisc *sch, struct nlattr *opt)
1da177e4
LT
201{
202 struct red_sched_data *q = qdisc_priv(sch);
1e90474c 203 struct nlattr *tb[TCA_RED_MAX + 1];
1da177e4 204 struct tc_red_qopt *ctl;
f38c39d6 205 struct Qdisc *child = NULL;
cee63723 206 int err;
a73ed26b 207 u32 max_P;
1da177e4 208
cee63723 209 if (opt == NULL)
dba051f3
TG
210 return -EINVAL;
211
fceb6435 212 err = nla_parse_nested(tb, TCA_RED_MAX, opt, red_policy, NULL);
cee63723
PM
213 if (err < 0)
214 return err;
215
1e90474c 216 if (tb[TCA_RED_PARMS] == NULL ||
27a3421e 217 tb[TCA_RED_STAB] == NULL)
1da177e4
LT
218 return -EINVAL;
219
a73ed26b
ED
220 max_P = tb[TCA_RED_MAX_P] ? nla_get_u32(tb[TCA_RED_MAX_P]) : 0;
221
1e90474c 222 ctl = nla_data(tb[TCA_RED_PARMS]);
8afa10cb
NF
223 if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog))
224 return -EINVAL;
1da177e4 225
f38c39d6 226 if (ctl->limit > 0) {
fb0305ce
PM
227 child = fifo_create_dflt(sch, &bfifo_qdisc_ops, ctl->limit);
228 if (IS_ERR(child))
229 return PTR_ERR(child);
f38c39d6
PM
230 }
231
49b49971
JK
232 if (child != &noop_qdisc)
233 qdisc_hash_add(child, true);
1da177e4
LT
234 sch_tree_lock(sch);
235 q->flags = ctl->flags;
1da177e4 236 q->limit = ctl->limit;
5e50da01 237 if (child) {
2ccccf5f
WC
238 qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
239 q->qdisc->qstats.backlog);
b94c8afc
PM
240 qdisc_destroy(q->qdisc);
241 q->qdisc = child;
5e50da01 242 }
1da177e4 243
eeca6688
ED
244 red_set_parms(&q->parms,
245 ctl->qth_min, ctl->qth_max, ctl->Wlog,
a73ed26b
ED
246 ctl->Plog, ctl->Scell_log,
247 nla_data(tb[TCA_RED_STAB]),
248 max_P);
eeca6688 249 red_set_vars(&q->vars);
6b31b28a 250
8af2a218
ED
251 del_timer(&q->adapt_timer);
252 if (ctl->flags & TC_RED_ADAPTATIVE)
253 mod_timer(&q->adapt_timer, jiffies + HZ/2);
254
1ee5fa1e 255 if (!q->qdisc->q.qlen)
eeca6688 256 red_start_of_idle_period(&q->vars);
dba051f3 257
1da177e4 258 sch_tree_unlock(sch);
602f3baf 259 red_offload(sch, true);
1da177e4
LT
260 return 0;
261}
262
cdeabbb8 263static inline void red_adaptative_timer(struct timer_list *t)
8af2a218 264{
cdeabbb8
KC
265 struct red_sched_data *q = from_timer(q, t, adapt_timer);
266 struct Qdisc *sch = q->sch;
8af2a218
ED
267 spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
268
269 spin_lock(root_lock);
eeca6688 270 red_adaptative_algo(&q->parms, &q->vars);
8af2a218
ED
271 mod_timer(&q->adapt_timer, jiffies + HZ/2);
272 spin_unlock(root_lock);
273}
274
e63d7dfd
AA
275static int red_init(struct Qdisc *sch, struct nlattr *opt,
276 struct netlink_ext_ack *extack)
1da177e4 277{
f38c39d6
PM
278 struct red_sched_data *q = qdisc_priv(sch);
279
280 q->qdisc = &noop_qdisc;
cdeabbb8
KC
281 q->sch = sch;
282 timer_setup(&q->adapt_timer, red_adaptative_timer, 0);
1da177e4
LT
283 return red_change(sch, opt);
284}
285
428a68af 286static int red_dump_offload_stats(struct Qdisc *sch, struct tc_red_qopt *opt)
602f3baf
NF
287{
288 struct net_device *dev = qdisc_dev(sch);
289 struct tc_red_qopt_offload hw_stats = {
ee9d3429 290 .command = TC_RED_STATS,
602f3baf
NF
291 .handle = sch->handle,
292 .parent = sch->parent,
ee9d3429
AM
293 {
294 .stats.bstats = &sch->bstats,
295 .stats.qstats = &sch->qstats,
296 },
602f3baf 297 };
602f3baf 298
428a68af 299 if (!(sch->flags & TCQ_F_OFFLOADED))
602f3baf
NF
300 return 0;
301
428a68af
YM
302 return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED,
303 &hw_stats);
602f3baf
NF
304}
305
1da177e4
LT
306static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
307{
308 struct red_sched_data *q = qdisc_priv(sch);
1e90474c 309 struct nlattr *opts = NULL;
6b31b28a
TG
310 struct tc_red_qopt opt = {
311 .limit = q->limit,
312 .flags = q->flags,
313 .qth_min = q->parms.qth_min >> q->parms.Wlog,
314 .qth_max = q->parms.qth_max >> q->parms.Wlog,
315 .Wlog = q->parms.Wlog,
316 .Plog = q->parms.Plog,
317 .Scell_log = q->parms.Scell_log,
318 };
602f3baf 319 int err;
1da177e4 320
0dfb33a0 321 sch->qstats.backlog = q->qdisc->qstats.backlog;
428a68af 322 err = red_dump_offload_stats(sch, &opt);
602f3baf
NF
323 if (err)
324 goto nla_put_failure;
325
1e90474c
PM
326 opts = nla_nest_start(skb, TCA_OPTIONS);
327 if (opts == NULL)
328 goto nla_put_failure;
1b34ec43
DM
329 if (nla_put(skb, TCA_RED_PARMS, sizeof(opt), &opt) ||
330 nla_put_u32(skb, TCA_RED_MAX_P, q->parms.max_P))
331 goto nla_put_failure;
1e90474c 332 return nla_nest_end(skb, opts);
1da177e4 333
1e90474c 334nla_put_failure:
bc3ed28c
TG
335 nla_nest_cancel(skb, opts);
336 return -EMSGSIZE;
1da177e4
LT
337}
338
339static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
340{
341 struct red_sched_data *q = qdisc_priv(sch);
602f3baf 342 struct net_device *dev = qdisc_dev(sch);
6b31b28a
TG
343 struct tc_red_xstats st = {
344 .early = q->stats.prob_drop + q->stats.forced_drop,
345 .pdrop = q->stats.pdrop,
346 .other = q->stats.other,
347 .marked = q->stats.prob_mark + q->stats.forced_mark,
348 };
349
428a68af 350 if (sch->flags & TCQ_F_OFFLOADED) {
602f3baf
NF
351 struct red_stats hw_stats = {0};
352 struct tc_red_qopt_offload hw_stats_request = {
ee9d3429 353 .command = TC_RED_XSTATS,
602f3baf
NF
354 .handle = sch->handle,
355 .parent = sch->parent,
ee9d3429
AM
356 {
357 .xstats = &hw_stats,
358 },
602f3baf
NF
359 };
360 if (!dev->netdev_ops->ndo_setup_tc(dev,
361 TC_SETUP_QDISC_RED,
362 &hw_stats_request)) {
363 st.early += hw_stats.prob_drop + hw_stats.forced_drop;
364 st.pdrop += hw_stats.pdrop;
365 st.other += hw_stats.other;
366 st.marked += hw_stats.prob_mark + hw_stats.forced_mark;
367 }
368 }
369
6b31b28a 370 return gnet_stats_copy_app(d, &st, sizeof(st));
1da177e4
LT
371}
372
f38c39d6
PM
373static int red_dump_class(struct Qdisc *sch, unsigned long cl,
374 struct sk_buff *skb, struct tcmsg *tcm)
375{
376 struct red_sched_data *q = qdisc_priv(sch);
377
f38c39d6
PM
378 tcm->tcm_handle |= TC_H_MIN(1);
379 tcm->tcm_info = q->qdisc->handle;
380 return 0;
381}
382
383static int red_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
384 struct Qdisc **old)
385{
386 struct red_sched_data *q = qdisc_priv(sch);
387
388 if (new == NULL)
389 new = &noop_qdisc;
390
86a7996c 391 *old = qdisc_replace(sch, new, &q->qdisc);
f38c39d6
PM
392 return 0;
393}
394
395static struct Qdisc *red_leaf(struct Qdisc *sch, unsigned long arg)
396{
397 struct red_sched_data *q = qdisc_priv(sch);
398 return q->qdisc;
399}
400
143976ce 401static unsigned long red_find(struct Qdisc *sch, u32 classid)
f38c39d6
PM
402{
403 return 1;
404}
405
f38c39d6
PM
406static void red_walk(struct Qdisc *sch, struct qdisc_walker *walker)
407{
408 if (!walker->stop) {
409 if (walker->count >= walker->skip)
410 if (walker->fn(sch, 1, walker) < 0) {
411 walker->stop = 1;
412 return;
413 }
414 walker->count++;
415 }
416}
417
20fea08b 418static const struct Qdisc_class_ops red_class_ops = {
f38c39d6
PM
419 .graft = red_graft,
420 .leaf = red_leaf,
143976ce 421 .find = red_find,
f38c39d6 422 .walk = red_walk,
f38c39d6
PM
423 .dump = red_dump_class,
424};
425
20fea08b 426static struct Qdisc_ops red_qdisc_ops __read_mostly = {
1da177e4
LT
427 .id = "red",
428 .priv_size = sizeof(struct red_sched_data),
f38c39d6 429 .cl_ops = &red_class_ops,
1da177e4
LT
430 .enqueue = red_enqueue,
431 .dequeue = red_dequeue,
8e3af978 432 .peek = red_peek,
1da177e4
LT
433 .init = red_init,
434 .reset = red_reset,
f38c39d6 435 .destroy = red_destroy,
1da177e4
LT
436 .change = red_change,
437 .dump = red_dump,
438 .dump_stats = red_dump_stats,
439 .owner = THIS_MODULE,
440};
441
442static int __init red_module_init(void)
443{
444 return register_qdisc(&red_qdisc_ops);
445}
dba051f3
TG
446
447static void __exit red_module_exit(void)
1da177e4
LT
448{
449 unregister_qdisc(&red_qdisc_ops);
450}
dba051f3 451
1da177e4
LT
452module_init(red_module_init)
453module_exit(red_module_exit)
dba051f3 454
1da177e4 455MODULE_LICENSE("GPL");