net: sch: red: Change the name of the stats struct to be generic
[linux-block.git] / net / sched / sch_red.c
CommitLineData
1da177e4
LT
1/*
2 * net/sched/sch_red.c Random Early Detection queue.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 *
11 * Changes:
dba051f3 12 * J Hadi Salim 980914: computation fixes
1da177e4 13 * Alexey Makarenko <makar@phoenix.kharkov.ua> 990814: qave on idle link was calculated incorrectly.
dba051f3 14 * J Hadi Salim 980816: ECN support
1da177e4
LT
15 */
16
1da177e4 17#include <linux/module.h>
1da177e4
LT
18#include <linux/types.h>
19#include <linux/kernel.h>
1da177e4 20#include <linux/skbuff.h>
1da177e4 21#include <net/pkt_sched.h>
602f3baf 22#include <net/pkt_cls.h>
1da177e4 23#include <net/inet_ecn.h>
6b31b28a 24#include <net/red.h>
1da177e4
LT
25
26
6b31b28a 27/* Parameters, settable by user:
1da177e4
LT
28 -----------------------------
29
30 limit - bytes (must be > qth_max + burst)
31
32 Hard limit on queue length, should be chosen >qth_max
33 to allow packet bursts. This parameter does not
34 affect the algorithms behaviour and can be chosen
35 arbitrarily high (well, less than ram size)
36 Really, this limit will never be reached
37 if RED works correctly.
1da177e4
LT
38 */
39
cc7ec456 40struct red_sched_data {
6b31b28a
TG
41 u32 limit; /* HARD maximal queue length */
42 unsigned char flags;
8af2a218 43 struct timer_list adapt_timer;
cdeabbb8 44 struct Qdisc *sch;
6b31b28a 45 struct red_parms parms;
eeca6688 46 struct red_vars vars;
6b31b28a 47 struct red_stats stats;
f38c39d6 48 struct Qdisc *qdisc;
1da177e4
LT
49};
50
6b31b28a 51static inline int red_use_ecn(struct red_sched_data *q)
1da177e4 52{
6b31b28a 53 return q->flags & TC_RED_ECN;
1da177e4
LT
54}
55
bdc450a0
TG
56static inline int red_use_harddrop(struct red_sched_data *q)
57{
58 return q->flags & TC_RED_HARDDROP;
59}
60
520ac30f
ED
61static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch,
62 struct sk_buff **to_free)
1da177e4
LT
63{
64 struct red_sched_data *q = qdisc_priv(sch);
f38c39d6
PM
65 struct Qdisc *child = q->qdisc;
66 int ret;
1da177e4 67
eeca6688
ED
68 q->vars.qavg = red_calc_qavg(&q->parms,
69 &q->vars,
70 child->qstats.backlog);
1da177e4 71
eeca6688
ED
72 if (red_is_idling(&q->vars))
73 red_end_of_idle_period(&q->vars);
1da177e4 74
eeca6688 75 switch (red_action(&q->parms, &q->vars, q->vars.qavg)) {
cc7ec456
ED
76 case RED_DONT_MARK:
77 break;
78
79 case RED_PROB_MARK:
25331d6c 80 qdisc_qstats_overlimit(sch);
cc7ec456
ED
81 if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) {
82 q->stats.prob_drop++;
83 goto congestion_drop;
84 }
85
86 q->stats.prob_mark++;
87 break;
88
89 case RED_HARD_MARK:
25331d6c 90 qdisc_qstats_overlimit(sch);
cc7ec456
ED
91 if (red_use_harddrop(q) || !red_use_ecn(q) ||
92 !INET_ECN_set_ce(skb)) {
93 q->stats.forced_drop++;
94 goto congestion_drop;
95 }
96
97 q->stats.forced_mark++;
98 break;
1da177e4
LT
99 }
100
520ac30f 101 ret = qdisc_enqueue(skb, child, to_free);
f38c39d6 102 if (likely(ret == NET_XMIT_SUCCESS)) {
d7f4f332 103 qdisc_qstats_backlog_inc(sch, skb);
f38c39d6 104 sch->q.qlen++;
378a2f09 105 } else if (net_xmit_drop_count(ret)) {
f38c39d6 106 q->stats.pdrop++;
25331d6c 107 qdisc_qstats_drop(sch);
f38c39d6
PM
108 }
109 return ret;
6b31b28a
TG
110
111congestion_drop:
520ac30f 112 qdisc_drop(skb, sch, to_free);
1da177e4
LT
113 return NET_XMIT_CN;
114}
115
cc7ec456 116static struct sk_buff *red_dequeue(struct Qdisc *sch)
1da177e4
LT
117{
118 struct sk_buff *skb;
119 struct red_sched_data *q = qdisc_priv(sch);
f38c39d6 120 struct Qdisc *child = q->qdisc;
1da177e4 121
f38c39d6 122 skb = child->dequeue(child);
9190b3b3
ED
123 if (skb) {
124 qdisc_bstats_update(sch, skb);
d7f4f332 125 qdisc_qstats_backlog_dec(sch, skb);
f38c39d6 126 sch->q.qlen--;
9190b3b3 127 } else {
eeca6688
ED
128 if (!red_is_idling(&q->vars))
129 red_start_of_idle_period(&q->vars);
9190b3b3 130 }
9e178ff2 131 return skb;
1da177e4
LT
132}
133
cc7ec456 134static struct sk_buff *red_peek(struct Qdisc *sch)
8e3af978
JP
135{
136 struct red_sched_data *q = qdisc_priv(sch);
137 struct Qdisc *child = q->qdisc;
138
139 return child->ops->peek(child);
140}
141
cc7ec456 142static void red_reset(struct Qdisc *sch)
1da177e4
LT
143{
144 struct red_sched_data *q = qdisc_priv(sch);
145
f38c39d6 146 qdisc_reset(q->qdisc);
d7f4f332 147 sch->qstats.backlog = 0;
f38c39d6 148 sch->q.qlen = 0;
eeca6688 149 red_restart(&q->vars);
1da177e4
LT
150}
151
602f3baf
NF
152static int red_offload(struct Qdisc *sch, bool enable)
153{
154 struct red_sched_data *q = qdisc_priv(sch);
155 struct net_device *dev = qdisc_dev(sch);
156 struct tc_red_qopt_offload opt = {
157 .handle = sch->handle,
158 .parent = sch->parent,
159 };
160
161 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
162 return -EOPNOTSUPP;
163
164 if (enable) {
165 opt.command = TC_RED_REPLACE;
166 opt.set.min = q->parms.qth_min >> q->parms.Wlog;
167 opt.set.max = q->parms.qth_max >> q->parms.Wlog;
168 opt.set.probability = q->parms.max_P;
169 opt.set.is_ecn = red_use_ecn(q);
170 } else {
171 opt.command = TC_RED_DESTROY;
172 }
173
8234af2d 174 return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED, &opt);
602f3baf
NF
175}
176
f38c39d6
PM
177static void red_destroy(struct Qdisc *sch)
178{
179 struct red_sched_data *q = qdisc_priv(sch);
8af2a218
ED
180
181 del_timer_sync(&q->adapt_timer);
602f3baf 182 red_offload(sch, false);
f38c39d6
PM
183 qdisc_destroy(q->qdisc);
184}
185
27a3421e
PM
186static const struct nla_policy red_policy[TCA_RED_MAX + 1] = {
187 [TCA_RED_PARMS] = { .len = sizeof(struct tc_red_qopt) },
188 [TCA_RED_STAB] = { .len = RED_STAB_SIZE },
a73ed26b 189 [TCA_RED_MAX_P] = { .type = NLA_U32 },
27a3421e
PM
190};
191
2030721c
AA
192static int red_change(struct Qdisc *sch, struct nlattr *opt,
193 struct netlink_ext_ack *extack)
1da177e4
LT
194{
195 struct red_sched_data *q = qdisc_priv(sch);
1e90474c 196 struct nlattr *tb[TCA_RED_MAX + 1];
1da177e4 197 struct tc_red_qopt *ctl;
f38c39d6 198 struct Qdisc *child = NULL;
cee63723 199 int err;
a73ed26b 200 u32 max_P;
1da177e4 201
cee63723 202 if (opt == NULL)
dba051f3
TG
203 return -EINVAL;
204
fceb6435 205 err = nla_parse_nested(tb, TCA_RED_MAX, opt, red_policy, NULL);
cee63723
PM
206 if (err < 0)
207 return err;
208
1e90474c 209 if (tb[TCA_RED_PARMS] == NULL ||
27a3421e 210 tb[TCA_RED_STAB] == NULL)
1da177e4
LT
211 return -EINVAL;
212
a73ed26b
ED
213 max_P = tb[TCA_RED_MAX_P] ? nla_get_u32(tb[TCA_RED_MAX_P]) : 0;
214
1e90474c 215 ctl = nla_data(tb[TCA_RED_PARMS]);
8afa10cb
NF
216 if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog))
217 return -EINVAL;
1da177e4 218
f38c39d6 219 if (ctl->limit > 0) {
a38a9882
AA
220 child = fifo_create_dflt(sch, &bfifo_qdisc_ops, ctl->limit,
221 extack);
fb0305ce
PM
222 if (IS_ERR(child))
223 return PTR_ERR(child);
f38c39d6
PM
224 }
225
49b49971
JK
226 if (child != &noop_qdisc)
227 qdisc_hash_add(child, true);
1da177e4
LT
228 sch_tree_lock(sch);
229 q->flags = ctl->flags;
1da177e4 230 q->limit = ctl->limit;
5e50da01 231 if (child) {
2ccccf5f
WC
232 qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
233 q->qdisc->qstats.backlog);
b94c8afc
PM
234 qdisc_destroy(q->qdisc);
235 q->qdisc = child;
5e50da01 236 }
1da177e4 237
eeca6688
ED
238 red_set_parms(&q->parms,
239 ctl->qth_min, ctl->qth_max, ctl->Wlog,
a73ed26b
ED
240 ctl->Plog, ctl->Scell_log,
241 nla_data(tb[TCA_RED_STAB]),
242 max_P);
eeca6688 243 red_set_vars(&q->vars);
6b31b28a 244
8af2a218
ED
245 del_timer(&q->adapt_timer);
246 if (ctl->flags & TC_RED_ADAPTATIVE)
247 mod_timer(&q->adapt_timer, jiffies + HZ/2);
248
1ee5fa1e 249 if (!q->qdisc->q.qlen)
eeca6688 250 red_start_of_idle_period(&q->vars);
dba051f3 251
1da177e4 252 sch_tree_unlock(sch);
602f3baf 253 red_offload(sch, true);
1da177e4
LT
254 return 0;
255}
256
cdeabbb8 257static inline void red_adaptative_timer(struct timer_list *t)
8af2a218 258{
cdeabbb8
KC
259 struct red_sched_data *q = from_timer(q, t, adapt_timer);
260 struct Qdisc *sch = q->sch;
8af2a218
ED
261 spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
262
263 spin_lock(root_lock);
eeca6688 264 red_adaptative_algo(&q->parms, &q->vars);
8af2a218
ED
265 mod_timer(&q->adapt_timer, jiffies + HZ/2);
266 spin_unlock(root_lock);
267}
268
e63d7dfd
AA
269static int red_init(struct Qdisc *sch, struct nlattr *opt,
270 struct netlink_ext_ack *extack)
1da177e4 271{
f38c39d6
PM
272 struct red_sched_data *q = qdisc_priv(sch);
273
274 q->qdisc = &noop_qdisc;
cdeabbb8
KC
275 q->sch = sch;
276 timer_setup(&q->adapt_timer, red_adaptative_timer, 0);
2030721c 277 return red_change(sch, opt, extack);
1da177e4
LT
278}
279
428a68af 280static int red_dump_offload_stats(struct Qdisc *sch, struct tc_red_qopt *opt)
602f3baf
NF
281{
282 struct net_device *dev = qdisc_dev(sch);
283 struct tc_red_qopt_offload hw_stats = {
ee9d3429 284 .command = TC_RED_STATS,
602f3baf
NF
285 .handle = sch->handle,
286 .parent = sch->parent,
ee9d3429
AM
287 {
288 .stats.bstats = &sch->bstats,
289 .stats.qstats = &sch->qstats,
290 },
602f3baf 291 };
8234af2d
NF
292 int err;
293
294 sch->flags &= ~TCQ_F_OFFLOADED;
602f3baf 295
8234af2d
NF
296 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
297 return 0;
298
299 err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED,
300 &hw_stats);
301 if (err == -EOPNOTSUPP)
602f3baf
NF
302 return 0;
303
8234af2d
NF
304 if (!err)
305 sch->flags |= TCQ_F_OFFLOADED;
306
307 return err;
602f3baf
NF
308}
309
1da177e4
LT
310static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
311{
312 struct red_sched_data *q = qdisc_priv(sch);
1e90474c 313 struct nlattr *opts = NULL;
6b31b28a
TG
314 struct tc_red_qopt opt = {
315 .limit = q->limit,
316 .flags = q->flags,
317 .qth_min = q->parms.qth_min >> q->parms.Wlog,
318 .qth_max = q->parms.qth_max >> q->parms.Wlog,
319 .Wlog = q->parms.Wlog,
320 .Plog = q->parms.Plog,
321 .Scell_log = q->parms.Scell_log,
322 };
602f3baf 323 int err;
1da177e4 324
0dfb33a0 325 sch->qstats.backlog = q->qdisc->qstats.backlog;
428a68af 326 err = red_dump_offload_stats(sch, &opt);
602f3baf
NF
327 if (err)
328 goto nla_put_failure;
329
1e90474c
PM
330 opts = nla_nest_start(skb, TCA_OPTIONS);
331 if (opts == NULL)
332 goto nla_put_failure;
1b34ec43
DM
333 if (nla_put(skb, TCA_RED_PARMS, sizeof(opt), &opt) ||
334 nla_put_u32(skb, TCA_RED_MAX_P, q->parms.max_P))
335 goto nla_put_failure;
1e90474c 336 return nla_nest_end(skb, opts);
1da177e4 337
1e90474c 338nla_put_failure:
bc3ed28c
TG
339 nla_nest_cancel(skb, opts);
340 return -EMSGSIZE;
1da177e4
LT
341}
342
343static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
344{
345 struct red_sched_data *q = qdisc_priv(sch);
602f3baf 346 struct net_device *dev = qdisc_dev(sch);
6b31b28a
TG
347 struct tc_red_xstats st = {
348 .early = q->stats.prob_drop + q->stats.forced_drop,
349 .pdrop = q->stats.pdrop,
350 .other = q->stats.other,
351 .marked = q->stats.prob_mark + q->stats.forced_mark,
352 };
353
428a68af 354 if (sch->flags & TCQ_F_OFFLOADED) {
602f3baf
NF
355 struct red_stats hw_stats = {0};
356 struct tc_red_qopt_offload hw_stats_request = {
ee9d3429 357 .command = TC_RED_XSTATS,
602f3baf
NF
358 .handle = sch->handle,
359 .parent = sch->parent,
ee9d3429
AM
360 {
361 .xstats = &hw_stats,
362 },
602f3baf
NF
363 };
364 if (!dev->netdev_ops->ndo_setup_tc(dev,
365 TC_SETUP_QDISC_RED,
366 &hw_stats_request)) {
367 st.early += hw_stats.prob_drop + hw_stats.forced_drop;
368 st.pdrop += hw_stats.pdrop;
369 st.other += hw_stats.other;
370 st.marked += hw_stats.prob_mark + hw_stats.forced_mark;
371 }
372 }
373
6b31b28a 374 return gnet_stats_copy_app(d, &st, sizeof(st));
1da177e4
LT
375}
376
f38c39d6
PM
377static int red_dump_class(struct Qdisc *sch, unsigned long cl,
378 struct sk_buff *skb, struct tcmsg *tcm)
379{
380 struct red_sched_data *q = qdisc_priv(sch);
381
f38c39d6
PM
382 tcm->tcm_handle |= TC_H_MIN(1);
383 tcm->tcm_info = q->qdisc->handle;
384 return 0;
385}
386
387static int red_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
653d6fd6 388 struct Qdisc **old, struct netlink_ext_ack *extack)
f38c39d6
PM
389{
390 struct red_sched_data *q = qdisc_priv(sch);
391
392 if (new == NULL)
393 new = &noop_qdisc;
394
86a7996c 395 *old = qdisc_replace(sch, new, &q->qdisc);
f38c39d6
PM
396 return 0;
397}
398
399static struct Qdisc *red_leaf(struct Qdisc *sch, unsigned long arg)
400{
401 struct red_sched_data *q = qdisc_priv(sch);
402 return q->qdisc;
403}
404
143976ce 405static unsigned long red_find(struct Qdisc *sch, u32 classid)
f38c39d6
PM
406{
407 return 1;
408}
409
f38c39d6
PM
410static void red_walk(struct Qdisc *sch, struct qdisc_walker *walker)
411{
412 if (!walker->stop) {
413 if (walker->count >= walker->skip)
414 if (walker->fn(sch, 1, walker) < 0) {
415 walker->stop = 1;
416 return;
417 }
418 walker->count++;
419 }
420}
421
20fea08b 422static const struct Qdisc_class_ops red_class_ops = {
f38c39d6
PM
423 .graft = red_graft,
424 .leaf = red_leaf,
143976ce 425 .find = red_find,
f38c39d6 426 .walk = red_walk,
f38c39d6
PM
427 .dump = red_dump_class,
428};
429
20fea08b 430static struct Qdisc_ops red_qdisc_ops __read_mostly = {
1da177e4
LT
431 .id = "red",
432 .priv_size = sizeof(struct red_sched_data),
f38c39d6 433 .cl_ops = &red_class_ops,
1da177e4
LT
434 .enqueue = red_enqueue,
435 .dequeue = red_dequeue,
8e3af978 436 .peek = red_peek,
1da177e4
LT
437 .init = red_init,
438 .reset = red_reset,
f38c39d6 439 .destroy = red_destroy,
1da177e4
LT
440 .change = red_change,
441 .dump = red_dump,
442 .dump_stats = red_dump_stats,
443 .owner = THIS_MODULE,
444};
445
446static int __init red_module_init(void)
447{
448 return register_qdisc(&red_qdisc_ops);
449}
dba051f3
TG
450
451static void __exit red_module_exit(void)
1da177e4
LT
452{
453 unregister_qdisc(&red_qdisc_ops);
454}
dba051f3 455
1da177e4
LT
456module_init(red_module_init)
457module_exit(red_module_exit)
dba051f3 458
1da177e4 459MODULE_LICENSE("GPL");