net_sched: sch_codel: implement lockless codel_dump()
[linux-2.6-block.git] / net / sched / sch_codel.c
CommitLineData
32c7eec2 1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
76e3cc12
ED
2/*
3 * Codel - The Controlled-Delay Active Queue Management algorithm
4 *
5 * Copyright (C) 2011-2012 Kathleen Nichols <nichols@pollere.com>
6 * Copyright (C) 2011-2012 Van Jacobson <van@pollere.net>
7 *
8 * Implemented on linux by :
9 * Copyright (C) 2012 Michael D. Taht <dave.taht@bufferbloat.net>
80ba92fa 10 * Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com>
76e3cc12
ED
11 */
12
13#include <linux/module.h>
14#include <linux/slab.h>
15#include <linux/types.h>
16#include <linux/kernel.h>
17#include <linux/errno.h>
18#include <linux/skbuff.h>
ce5b4b97 19#include <linux/prefetch.h>
76e3cc12
ED
20#include <net/pkt_sched.h>
21#include <net/codel.h>
d068ca2a
MK
22#include <net/codel_impl.h>
23#include <net/codel_qdisc.h>
76e3cc12
ED
24
25
26#define DEFAULT_CODEL_LIMIT 1000
27
28struct codel_sched_data {
29 struct codel_params params;
30 struct codel_vars vars;
31 struct codel_stats stats;
32 u32 drop_overlimit;
33};
34
35/* This is the specific function called from codel_dequeue()
36 * to dequeue a packet from queue. Note: backlog is handled in
37 * codel, we dont need to reduce it here.
38 */
79bdc4c8 39static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx)
76e3cc12 40{
79bdc4c8 41 struct Qdisc *sch = ctx;
ed760cb8 42 struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
76e3cc12 43
051c7b39 44 if (skb) {
79bdc4c8 45 sch->qstats.backlog -= qdisc_pkt_len(skb);
051c7b39
JJB
46 prefetch(&skb->end); /* we'll need skb_shinfo() */
47 }
76e3cc12
ED
48 return skb;
49}
50
79bdc4c8
MK
51static void drop_func(struct sk_buff *skb, void *ctx)
52{
53 struct Qdisc *sch = ctx;
54
520ac30f
ED
55 kfree_skb(skb);
56 qdisc_qstats_drop(sch);
79bdc4c8
MK
57}
58
76e3cc12
ED
59static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch)
60{
61 struct codel_sched_data *q = qdisc_priv(sch);
62 struct sk_buff *skb;
63
79bdc4c8
MK
64 skb = codel_dequeue(sch, &sch->qstats.backlog, &q->params, &q->vars,
65 &q->stats, qdisc_pkt_len, codel_get_enqueue_time,
66 drop_func, dequeue_func);
865ec552 67
2ccccf5f 68 /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
76e3cc12
ED
69 * or HTB crashes. Defer it for next round.
70 */
71 if (q->stats.drop_count && sch->q.qlen) {
2ccccf5f 72 qdisc_tree_reduce_backlog(sch, q->stats.drop_count, q->stats.drop_len);
76e3cc12 73 q->stats.drop_count = 0;
2ccccf5f 74 q->stats.drop_len = 0;
76e3cc12
ED
75 }
76 if (skb)
77 qdisc_bstats_update(sch, skb);
78 return skb;
79}
80
520ac30f
ED
81static int codel_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
82 struct sk_buff **to_free)
76e3cc12
ED
83{
84 struct codel_sched_data *q;
85
86 if (likely(qdisc_qlen(sch) < sch->limit)) {
87 codel_set_enqueue_time(skb);
88 return qdisc_enqueue_tail(skb, sch);
89 }
90 q = qdisc_priv(sch);
91 q->drop_overlimit++;
520ac30f 92 return qdisc_drop(skb, sch, to_free);
76e3cc12
ED
93}
94
95static const struct nla_policy codel_policy[TCA_CODEL_MAX + 1] = {
96 [TCA_CODEL_TARGET] = { .type = NLA_U32 },
97 [TCA_CODEL_LIMIT] = { .type = NLA_U32 },
98 [TCA_CODEL_INTERVAL] = { .type = NLA_U32 },
99 [TCA_CODEL_ECN] = { .type = NLA_U32 },
80ba92fa 100 [TCA_CODEL_CE_THRESHOLD]= { .type = NLA_U32 },
76e3cc12
ED
101};
102
2030721c
AA
103static int codel_change(struct Qdisc *sch, struct nlattr *opt,
104 struct netlink_ext_ack *extack)
76e3cc12
ED
105{
106 struct codel_sched_data *q = qdisc_priv(sch);
107 struct nlattr *tb[TCA_CODEL_MAX + 1];
2ccccf5f 108 unsigned int qlen, dropped = 0;
76e3cc12
ED
109 int err;
110
8cb08174
JB
111 err = nla_parse_nested_deprecated(tb, TCA_CODEL_MAX, opt,
112 codel_policy, NULL);
76e3cc12
ED
113 if (err < 0)
114 return err;
115
116 sch_tree_lock(sch);
117
118 if (tb[TCA_CODEL_TARGET]) {
119 u32 target = nla_get_u32(tb[TCA_CODEL_TARGET]);
120
c45bd26c
ED
121 WRITE_ONCE(q->params.target,
122 ((u64)target * NSEC_PER_USEC) >> CODEL_SHIFT);
76e3cc12
ED
123 }
124
80ba92fa
ED
125 if (tb[TCA_CODEL_CE_THRESHOLD]) {
126 u64 val = nla_get_u32(tb[TCA_CODEL_CE_THRESHOLD]);
127
c45bd26c
ED
128 WRITE_ONCE(q->params.ce_threshold,
129 (val * NSEC_PER_USEC) >> CODEL_SHIFT);
80ba92fa
ED
130 }
131
76e3cc12
ED
132 if (tb[TCA_CODEL_INTERVAL]) {
133 u32 interval = nla_get_u32(tb[TCA_CODEL_INTERVAL]);
134
c45bd26c
ED
135 WRITE_ONCE(q->params.interval,
136 ((u64)interval * NSEC_PER_USEC) >> CODEL_SHIFT);
76e3cc12
ED
137 }
138
139 if (tb[TCA_CODEL_LIMIT])
c45bd26c
ED
140 WRITE_ONCE(sch->limit,
141 nla_get_u32(tb[TCA_CODEL_LIMIT]));
76e3cc12
ED
142
143 if (tb[TCA_CODEL_ECN])
c45bd26c
ED
144 WRITE_ONCE(q->params.ecn,
145 !!nla_get_u32(tb[TCA_CODEL_ECN]));
76e3cc12
ED
146
147 qlen = sch->q.qlen;
148 while (sch->q.qlen > sch->limit) {
ed760cb8 149 struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
76e3cc12 150
2ccccf5f 151 dropped += qdisc_pkt_len(skb);
25331d6c 152 qdisc_qstats_backlog_dec(sch, skb);
b3d7e2b2 153 rtnl_qdisc_drop(skb, sch);
76e3cc12 154 }
2ccccf5f 155 qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
76e3cc12
ED
156
157 sch_tree_unlock(sch);
158 return 0;
159}
160
e63d7dfd
AA
161static int codel_init(struct Qdisc *sch, struct nlattr *opt,
162 struct netlink_ext_ack *extack)
76e3cc12
ED
163{
164 struct codel_sched_data *q = qdisc_priv(sch);
165
166 sch->limit = DEFAULT_CODEL_LIMIT;
167
79bdc4c8 168 codel_params_init(&q->params);
76e3cc12
ED
169 codel_vars_init(&q->vars);
170 codel_stats_init(&q->stats);
79bdc4c8 171 q->params.mtu = psched_mtu(qdisc_dev(sch));
76e3cc12
ED
172
173 if (opt) {
2030721c 174 int err = codel_change(sch, opt, extack);
76e3cc12
ED
175
176 if (err)
177 return err;
178 }
179
180 if (sch->limit >= 1)
181 sch->flags |= TCQ_F_CAN_BYPASS;
182 else
183 sch->flags &= ~TCQ_F_CAN_BYPASS;
184
185 return 0;
186}
187
188static int codel_dump(struct Qdisc *sch, struct sk_buff *skb)
189{
190 struct codel_sched_data *q = qdisc_priv(sch);
c45bd26c 191 codel_time_t ce_threshold;
76e3cc12
ED
192 struct nlattr *opts;
193
ae0be8de 194 opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
76e3cc12
ED
195 if (opts == NULL)
196 goto nla_put_failure;
197
198 if (nla_put_u32(skb, TCA_CODEL_TARGET,
c45bd26c 199 codel_time_to_us(READ_ONCE(q->params.target))) ||
76e3cc12 200 nla_put_u32(skb, TCA_CODEL_LIMIT,
c45bd26c 201 READ_ONCE(sch->limit)) ||
76e3cc12 202 nla_put_u32(skb, TCA_CODEL_INTERVAL,
c45bd26c 203 codel_time_to_us(READ_ONCE(q->params.interval))) ||
76e3cc12 204 nla_put_u32(skb, TCA_CODEL_ECN,
c45bd26c 205 READ_ONCE(q->params.ecn)))
76e3cc12 206 goto nla_put_failure;
c45bd26c
ED
207 ce_threshold = READ_ONCE(q->params.ce_threshold);
208 if (ce_threshold != CODEL_DISABLED_THRESHOLD &&
80ba92fa 209 nla_put_u32(skb, TCA_CODEL_CE_THRESHOLD,
c45bd26c 210 codel_time_to_us(ce_threshold)))
80ba92fa 211 goto nla_put_failure;
76e3cc12
ED
212 return nla_nest_end(skb, opts);
213
214nla_put_failure:
215 nla_nest_cancel(skb, opts);
216 return -1;
217}
218
219static int codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
220{
221 const struct codel_sched_data *q = qdisc_priv(sch);
222 struct tc_codel_xstats st = {
223 .maxpacket = q->stats.maxpacket,
224 .count = q->vars.count,
225 .lastcount = q->vars.lastcount,
226 .drop_overlimit = q->drop_overlimit,
227 .ldelay = codel_time_to_us(q->vars.ldelay),
228 .dropping = q->vars.dropping,
229 .ecn_mark = q->stats.ecn_mark,
80ba92fa 230 .ce_mark = q->stats.ce_mark,
76e3cc12
ED
231 };
232
233 if (q->vars.dropping) {
234 codel_tdiff_t delta = q->vars.drop_next - codel_get_time();
235
236 if (delta >= 0)
237 st.drop_next = codel_time_to_us(delta);
238 else
239 st.drop_next = -codel_time_to_us(-delta);
240 }
241
242 return gnet_stats_copy_app(d, &st, sizeof(st));
243}
244
245static void codel_reset(struct Qdisc *sch)
246{
247 struct codel_sched_data *q = qdisc_priv(sch);
248
249 qdisc_reset_queue(sch);
250 codel_vars_init(&q->vars);
251}
252
253static struct Qdisc_ops codel_qdisc_ops __read_mostly = {
254 .id = "codel",
255 .priv_size = sizeof(struct codel_sched_data),
256
257 .enqueue = codel_qdisc_enqueue,
258 .dequeue = codel_qdisc_dequeue,
259 .peek = qdisc_peek_dequeued,
260 .init = codel_init,
261 .reset = codel_reset,
262 .change = codel_change,
263 .dump = codel_dump,
264 .dump_stats = codel_dump_stats,
265 .owner = THIS_MODULE,
266};
241a94ab 267MODULE_ALIAS_NET_SCH("codel");
76e3cc12
ED
268
269static int __init codel_module_init(void)
270{
271 return register_qdisc(&codel_qdisc_ops);
272}
273
274static void __exit codel_module_exit(void)
275{
276 unregister_qdisc(&codel_qdisc_ops);
277}
278
279module_init(codel_module_init)
280module_exit(codel_module_exit)
281
282MODULE_DESCRIPTION("Controlled Delay queue discipline");
283MODULE_AUTHOR("Dave Taht");
284MODULE_AUTHOR("Eric Dumazet");
285MODULE_LICENSE("Dual BSD/GPL");