2 * Fair Queue CoDel discipline
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com>
12 #include <linux/module.h>
13 #include <linux/types.h>
14 #include <linux/kernel.h>
15 #include <linux/jiffies.h>
16 #include <linux/string.h>
18 #include <linux/errno.h>
19 #include <linux/init.h>
20 #include <linux/skbuff.h>
21 #include <linux/jhash.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
24 #include <net/netlink.h>
25 #include <net/pkt_sched.h>
26 #include <net/pkt_cls.h>
27 #include <net/codel.h>
28 #include <net/codel_impl.h>
29 #include <net/codel_qdisc.h>
34 * Packets are classified (internal classifier or external) on flows.
35 * This is a Stochastic model (as we use a hash, several flows
36 * might be hashed on same slot)
37 * Each flow has a CoDel managed queue.
38 * Flows are linked onto two (Round Robin) lists,
39 * so that new flows have priority on old ones.
41 * For a given flow, packets are not reordered (CoDel uses a FIFO)
43 * ECN capability is on by default.
44 * Low memory footprint (64 bytes per flow)
47 struct fq_codel_flow {
50 struct list_head flowchain;
52 u32 dropped; /* number of drops (or ECN marks) on this flow */
53 struct codel_vars cvars;
54 }; /* please try to keep this structure <= 64 bytes */
56 struct fq_codel_sched_data {
57 struct tcf_proto __rcu *filter_list; /* optional external classifier */
58 struct fq_codel_flow *flows; /* Flows table [flows_cnt] */
59 u32 *backlogs; /* backlog table [flows_cnt] */
60 u32 flows_cnt; /* number of flows */
61 u32 quantum; /* psched_mtu(qdisc_dev(sch)); */
64 struct codel_params cparams;
65 struct codel_stats cstats;
71 struct list_head new_flows; /* list of new flows */
72 struct list_head old_flows; /* list of old flows */
75 static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q,
78 return reciprocal_scale(skb_get_hash(skb), q->flows_cnt);
81 static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch,
84 struct fq_codel_sched_data *q = qdisc_priv(sch);
85 struct tcf_proto *filter;
86 struct tcf_result res;
89 if (TC_H_MAJ(skb->priority) == sch->handle &&
90 TC_H_MIN(skb->priority) > 0 &&
91 TC_H_MIN(skb->priority) <= q->flows_cnt)
92 return TC_H_MIN(skb->priority);
94 filter = rcu_dereference_bh(q->filter_list);
96 return fq_codel_hash(q, skb) + 1;
98 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
99 result = tc_classify(skb, filter, &res, false);
101 #ifdef CONFIG_NET_CLS_ACT
105 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
110 if (TC_H_MIN(res.classid) <= q->flows_cnt)
111 return TC_H_MIN(res.classid);
116 /* helper functions : might be changed when/if skb use a standard list_head */
118 /* remove one skb from head of slot queue */
119 static inline struct sk_buff *dequeue_head(struct fq_codel_flow *flow)
121 struct sk_buff *skb = flow->head;
123 flow->head = skb->next;
128 /* add skb to flow queue (tail add) */
129 static inline void flow_queue_add(struct fq_codel_flow *flow,
132 if (flow->head == NULL)
135 flow->tail->next = skb;
140 static unsigned int fq_codel_drop(struct Qdisc *sch, unsigned int max_packets,
141 struct sk_buff **to_free)
143 struct fq_codel_sched_data *q = qdisc_priv(sch);
145 unsigned int maxbacklog = 0, idx = 0, i, len;
146 struct fq_codel_flow *flow;
147 unsigned int threshold;
148 unsigned int mem = 0;
150 /* Queue is full! Find the fat flow and drop packet(s) from it.
151 * This might sound expensive, but with 1024 flows, we scan
152 * 4KB of memory, and we dont need to handle a complex tree
153 * in fast path (packet queue/enqueue) with many cache misses.
154 * In stress mode, we'll try to drop 64 packets from the flow,
155 * amortizing this linear lookup to one cache line per drop.
157 for (i = 0; i < q->flows_cnt; i++) {
158 if (q->backlogs[i] > maxbacklog) {
159 maxbacklog = q->backlogs[i];
164 /* Our goal is to drop half of this fat flow backlog */
165 threshold = maxbacklog >> 1;
167 flow = &q->flows[idx];
171 skb = dequeue_head(flow);
172 len += qdisc_pkt_len(skb);
173 mem += get_codel_cb(skb)->mem_usage;
174 __qdisc_drop(skb, to_free);
175 } while (++i < max_packets && len < threshold);
178 q->backlogs[idx] -= len;
179 q->memory_usage -= mem;
180 sch->qstats.drops += i;
181 sch->qstats.backlog -= len;
186 static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch,
187 struct sk_buff **to_free)
189 struct fq_codel_sched_data *q = qdisc_priv(sch);
190 unsigned int idx, prev_backlog, prev_qlen;
191 struct fq_codel_flow *flow;
192 int uninitialized_var(ret);
193 unsigned int pkt_len;
196 idx = fq_codel_classify(skb, sch, &ret);
198 if (ret & __NET_XMIT_BYPASS)
199 qdisc_qstats_drop(sch);
200 __qdisc_drop(skb, to_free);
205 codel_set_enqueue_time(skb);
206 flow = &q->flows[idx];
207 flow_queue_add(flow, skb);
208 q->backlogs[idx] += qdisc_pkt_len(skb);
209 qdisc_qstats_backlog_inc(sch, skb);
211 if (list_empty(&flow->flowchain)) {
212 list_add_tail(&flow->flowchain, &q->new_flows);
214 flow->deficit = q->quantum;
217 get_codel_cb(skb)->mem_usage = skb->truesize;
218 q->memory_usage += get_codel_cb(skb)->mem_usage;
219 memory_limited = q->memory_usage > q->memory_limit;
220 if (++sch->q.qlen <= sch->limit && !memory_limited)
221 return NET_XMIT_SUCCESS;
223 prev_backlog = sch->qstats.backlog;
224 prev_qlen = sch->q.qlen;
226 /* save this packet length as it might be dropped by fq_codel_drop() */
227 pkt_len = qdisc_pkt_len(skb);
228 /* fq_codel_drop() is quite expensive, as it performs a linear search
229 * in q->backlogs[] to find a fat flow.
230 * So instead of dropping a single packet, drop half of its backlog
231 * with a 64 packets limit to not add a too big cpu spike here.
233 ret = fq_codel_drop(sch, q->drop_batch_size, to_free);
235 prev_qlen -= sch->q.qlen;
236 prev_backlog -= sch->qstats.backlog;
237 q->drop_overlimit += prev_qlen;
239 q->drop_overmemory += prev_qlen;
241 /* As we dropped packet(s), better let upper stack know this.
242 * If we dropped a packet for this flow, return NET_XMIT_CN,
243 * but in this case, our parents wont increase their backlogs.
246 qdisc_tree_reduce_backlog(sch, prev_qlen - 1,
247 prev_backlog - pkt_len);
250 qdisc_tree_reduce_backlog(sch, prev_qlen, prev_backlog);
251 return NET_XMIT_SUCCESS;
254 /* This is the specific function called from codel_dequeue()
255 * to dequeue a packet from queue. Note: backlog is handled in
256 * codel, we dont need to reduce it here.
258 static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx)
260 struct Qdisc *sch = ctx;
261 struct fq_codel_sched_data *q = qdisc_priv(sch);
262 struct fq_codel_flow *flow;
263 struct sk_buff *skb = NULL;
265 flow = container_of(vars, struct fq_codel_flow, cvars);
267 skb = dequeue_head(flow);
268 q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb);
269 q->memory_usage -= get_codel_cb(skb)->mem_usage;
271 sch->qstats.backlog -= qdisc_pkt_len(skb);
276 static void drop_func(struct sk_buff *skb, void *ctx)
278 struct Qdisc *sch = ctx;
281 qdisc_qstats_drop(sch);
284 static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch)
286 struct fq_codel_sched_data *q = qdisc_priv(sch);
288 struct fq_codel_flow *flow;
289 struct list_head *head;
290 u32 prev_drop_count, prev_ecn_mark;
293 head = &q->new_flows;
294 if (list_empty(head)) {
295 head = &q->old_flows;
296 if (list_empty(head))
299 flow = list_first_entry(head, struct fq_codel_flow, flowchain);
301 if (flow->deficit <= 0) {
302 flow->deficit += q->quantum;
303 list_move_tail(&flow->flowchain, &q->old_flows);
307 prev_drop_count = q->cstats.drop_count;
308 prev_ecn_mark = q->cstats.ecn_mark;
310 skb = codel_dequeue(sch, &sch->qstats.backlog, &q->cparams,
311 &flow->cvars, &q->cstats, qdisc_pkt_len,
312 codel_get_enqueue_time, drop_func, dequeue_func);
314 flow->dropped += q->cstats.drop_count - prev_drop_count;
315 flow->dropped += q->cstats.ecn_mark - prev_ecn_mark;
318 /* force a pass through old_flows to prevent starvation */
319 if ((head == &q->new_flows) && !list_empty(&q->old_flows))
320 list_move_tail(&flow->flowchain, &q->old_flows);
322 list_del_init(&flow->flowchain);
325 qdisc_bstats_update(sch, skb);
326 flow->deficit -= qdisc_pkt_len(skb);
327 /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
328 * or HTB crashes. Defer it for next round.
330 if (q->cstats.drop_count && sch->q.qlen) {
331 qdisc_tree_reduce_backlog(sch, q->cstats.drop_count,
333 q->cstats.drop_count = 0;
334 q->cstats.drop_len = 0;
339 static void fq_codel_flow_purge(struct fq_codel_flow *flow)
341 rtnl_kfree_skbs(flow->head, flow->tail);
345 static void fq_codel_reset(struct Qdisc *sch)
347 struct fq_codel_sched_data *q = qdisc_priv(sch);
350 INIT_LIST_HEAD(&q->new_flows);
351 INIT_LIST_HEAD(&q->old_flows);
352 for (i = 0; i < q->flows_cnt; i++) {
353 struct fq_codel_flow *flow = q->flows + i;
355 fq_codel_flow_purge(flow);
356 INIT_LIST_HEAD(&flow->flowchain);
357 codel_vars_init(&flow->cvars);
359 memset(q->backlogs, 0, q->flows_cnt * sizeof(u32));
361 sch->qstats.backlog = 0;
365 static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = {
366 [TCA_FQ_CODEL_TARGET] = { .type = NLA_U32 },
367 [TCA_FQ_CODEL_LIMIT] = { .type = NLA_U32 },
368 [TCA_FQ_CODEL_INTERVAL] = { .type = NLA_U32 },
369 [TCA_FQ_CODEL_ECN] = { .type = NLA_U32 },
370 [TCA_FQ_CODEL_FLOWS] = { .type = NLA_U32 },
371 [TCA_FQ_CODEL_QUANTUM] = { .type = NLA_U32 },
372 [TCA_FQ_CODEL_CE_THRESHOLD] = { .type = NLA_U32 },
373 [TCA_FQ_CODEL_DROP_BATCH_SIZE] = { .type = NLA_U32 },
374 [TCA_FQ_CODEL_MEMORY_LIMIT] = { .type = NLA_U32 },
377 static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt)
379 struct fq_codel_sched_data *q = qdisc_priv(sch);
380 struct nlattr *tb[TCA_FQ_CODEL_MAX + 1];
386 err = nla_parse_nested(tb, TCA_FQ_CODEL_MAX, opt, fq_codel_policy,
390 if (tb[TCA_FQ_CODEL_FLOWS]) {
393 q->flows_cnt = nla_get_u32(tb[TCA_FQ_CODEL_FLOWS]);
395 q->flows_cnt > 65536)
400 if (tb[TCA_FQ_CODEL_TARGET]) {
401 u64 target = nla_get_u32(tb[TCA_FQ_CODEL_TARGET]);
403 q->cparams.target = (target * NSEC_PER_USEC) >> CODEL_SHIFT;
406 if (tb[TCA_FQ_CODEL_CE_THRESHOLD]) {
407 u64 val = nla_get_u32(tb[TCA_FQ_CODEL_CE_THRESHOLD]);
409 q->cparams.ce_threshold = (val * NSEC_PER_USEC) >> CODEL_SHIFT;
412 if (tb[TCA_FQ_CODEL_INTERVAL]) {
413 u64 interval = nla_get_u32(tb[TCA_FQ_CODEL_INTERVAL]);
415 q->cparams.interval = (interval * NSEC_PER_USEC) >> CODEL_SHIFT;
418 if (tb[TCA_FQ_CODEL_LIMIT])
419 sch->limit = nla_get_u32(tb[TCA_FQ_CODEL_LIMIT]);
421 if (tb[TCA_FQ_CODEL_ECN])
422 q->cparams.ecn = !!nla_get_u32(tb[TCA_FQ_CODEL_ECN]);
424 if (tb[TCA_FQ_CODEL_QUANTUM])
425 q->quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM]));
427 if (tb[TCA_FQ_CODEL_DROP_BATCH_SIZE])
428 q->drop_batch_size = min(1U, nla_get_u32(tb[TCA_FQ_CODEL_DROP_BATCH_SIZE]));
430 if (tb[TCA_FQ_CODEL_MEMORY_LIMIT])
431 q->memory_limit = min(1U << 31, nla_get_u32(tb[TCA_FQ_CODEL_MEMORY_LIMIT]));
433 while (sch->q.qlen > sch->limit ||
434 q->memory_usage > q->memory_limit) {
435 struct sk_buff *skb = fq_codel_dequeue(sch);
437 q->cstats.drop_len += qdisc_pkt_len(skb);
438 rtnl_kfree_skbs(skb, skb);
439 q->cstats.drop_count++;
441 qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, q->cstats.drop_len);
442 q->cstats.drop_count = 0;
443 q->cstats.drop_len = 0;
445 sch_tree_unlock(sch);
449 static void *fq_codel_zalloc(size_t sz)
451 void *ptr = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN);
458 static void fq_codel_free(void *addr)
463 static void fq_codel_destroy(struct Qdisc *sch)
465 struct fq_codel_sched_data *q = qdisc_priv(sch);
467 tcf_destroy_chain(&q->filter_list);
468 fq_codel_free(q->backlogs);
469 fq_codel_free(q->flows);
472 static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt)
474 struct fq_codel_sched_data *q = qdisc_priv(sch);
477 sch->limit = 10*1024;
479 q->memory_limit = 32 << 20; /* 32 MBytes */
480 q->drop_batch_size = 64;
481 q->quantum = psched_mtu(qdisc_dev(sch));
482 INIT_LIST_HEAD(&q->new_flows);
483 INIT_LIST_HEAD(&q->old_flows);
484 codel_params_init(&q->cparams);
485 codel_stats_init(&q->cstats);
486 q->cparams.ecn = true;
487 q->cparams.mtu = psched_mtu(qdisc_dev(sch));
490 int err = fq_codel_change(sch, opt);
496 q->flows = fq_codel_zalloc(q->flows_cnt *
497 sizeof(struct fq_codel_flow));
500 q->backlogs = fq_codel_zalloc(q->flows_cnt * sizeof(u32));
502 fq_codel_free(q->flows);
505 for (i = 0; i < q->flows_cnt; i++) {
506 struct fq_codel_flow *flow = q->flows + i;
508 INIT_LIST_HEAD(&flow->flowchain);
509 codel_vars_init(&flow->cvars);
513 sch->flags |= TCQ_F_CAN_BYPASS;
515 sch->flags &= ~TCQ_F_CAN_BYPASS;
519 static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb)
521 struct fq_codel_sched_data *q = qdisc_priv(sch);
524 opts = nla_nest_start(skb, TCA_OPTIONS);
526 goto nla_put_failure;
528 if (nla_put_u32(skb, TCA_FQ_CODEL_TARGET,
529 codel_time_to_us(q->cparams.target)) ||
530 nla_put_u32(skb, TCA_FQ_CODEL_LIMIT,
532 nla_put_u32(skb, TCA_FQ_CODEL_INTERVAL,
533 codel_time_to_us(q->cparams.interval)) ||
534 nla_put_u32(skb, TCA_FQ_CODEL_ECN,
536 nla_put_u32(skb, TCA_FQ_CODEL_QUANTUM,
538 nla_put_u32(skb, TCA_FQ_CODEL_DROP_BATCH_SIZE,
539 q->drop_batch_size) ||
540 nla_put_u32(skb, TCA_FQ_CODEL_MEMORY_LIMIT,
542 nla_put_u32(skb, TCA_FQ_CODEL_FLOWS,
544 goto nla_put_failure;
546 if (q->cparams.ce_threshold != CODEL_DISABLED_THRESHOLD &&
547 nla_put_u32(skb, TCA_FQ_CODEL_CE_THRESHOLD,
548 codel_time_to_us(q->cparams.ce_threshold)))
549 goto nla_put_failure;
551 return nla_nest_end(skb, opts);
557 static int fq_codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
559 struct fq_codel_sched_data *q = qdisc_priv(sch);
560 struct tc_fq_codel_xstats st = {
561 .type = TCA_FQ_CODEL_XSTATS_QDISC,
563 struct list_head *pos;
565 st.qdisc_stats.maxpacket = q->cstats.maxpacket;
566 st.qdisc_stats.drop_overlimit = q->drop_overlimit;
567 st.qdisc_stats.ecn_mark = q->cstats.ecn_mark;
568 st.qdisc_stats.new_flow_count = q->new_flow_count;
569 st.qdisc_stats.ce_mark = q->cstats.ce_mark;
570 st.qdisc_stats.memory_usage = q->memory_usage;
571 st.qdisc_stats.drop_overmemory = q->drop_overmemory;
574 list_for_each(pos, &q->new_flows)
575 st.qdisc_stats.new_flows_len++;
577 list_for_each(pos, &q->old_flows)
578 st.qdisc_stats.old_flows_len++;
579 sch_tree_unlock(sch);
581 return gnet_stats_copy_app(d, &st, sizeof(st));
584 static struct Qdisc *fq_codel_leaf(struct Qdisc *sch, unsigned long arg)
589 static unsigned long fq_codel_get(struct Qdisc *sch, u32 classid)
594 static unsigned long fq_codel_bind(struct Qdisc *sch, unsigned long parent,
597 /* we cannot bypass queue discipline anymore */
598 sch->flags &= ~TCQ_F_CAN_BYPASS;
602 static void fq_codel_put(struct Qdisc *q, unsigned long cl)
606 static struct tcf_proto __rcu **fq_codel_find_tcf(struct Qdisc *sch,
609 struct fq_codel_sched_data *q = qdisc_priv(sch);
613 return &q->filter_list;
616 static int fq_codel_dump_class(struct Qdisc *sch, unsigned long cl,
617 struct sk_buff *skb, struct tcmsg *tcm)
619 tcm->tcm_handle |= TC_H_MIN(cl);
623 static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl,
626 struct fq_codel_sched_data *q = qdisc_priv(sch);
628 struct gnet_stats_queue qs = { 0 };
629 struct tc_fq_codel_xstats xstats;
631 if (idx < q->flows_cnt) {
632 const struct fq_codel_flow *flow = &q->flows[idx];
633 const struct sk_buff *skb;
635 memset(&xstats, 0, sizeof(xstats));
636 xstats.type = TCA_FQ_CODEL_XSTATS_CLASS;
637 xstats.class_stats.deficit = flow->deficit;
638 xstats.class_stats.ldelay =
639 codel_time_to_us(flow->cvars.ldelay);
640 xstats.class_stats.count = flow->cvars.count;
641 xstats.class_stats.lastcount = flow->cvars.lastcount;
642 xstats.class_stats.dropping = flow->cvars.dropping;
643 if (flow->cvars.dropping) {
644 codel_tdiff_t delta = flow->cvars.drop_next -
647 xstats.class_stats.drop_next = (delta >= 0) ?
648 codel_time_to_us(delta) :
649 -codel_time_to_us(-delta);
658 sch_tree_unlock(sch);
660 qs.backlog = q->backlogs[idx];
661 qs.drops = flow->dropped;
663 if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0)
665 if (idx < q->flows_cnt)
666 return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
670 static void fq_codel_walk(struct Qdisc *sch, struct qdisc_walker *arg)
672 struct fq_codel_sched_data *q = qdisc_priv(sch);
678 for (i = 0; i < q->flows_cnt; i++) {
679 if (list_empty(&q->flows[i].flowchain) ||
680 arg->count < arg->skip) {
684 if (arg->fn(sch, i + 1, arg) < 0) {
692 static const struct Qdisc_class_ops fq_codel_class_ops = {
693 .leaf = fq_codel_leaf,
696 .tcf_chain = fq_codel_find_tcf,
697 .bind_tcf = fq_codel_bind,
698 .unbind_tcf = fq_codel_put,
699 .dump = fq_codel_dump_class,
700 .dump_stats = fq_codel_dump_class_stats,
701 .walk = fq_codel_walk,
704 static struct Qdisc_ops fq_codel_qdisc_ops __read_mostly = {
705 .cl_ops = &fq_codel_class_ops,
707 .priv_size = sizeof(struct fq_codel_sched_data),
708 .enqueue = fq_codel_enqueue,
709 .dequeue = fq_codel_dequeue,
710 .peek = qdisc_peek_dequeued,
711 .init = fq_codel_init,
712 .reset = fq_codel_reset,
713 .destroy = fq_codel_destroy,
714 .change = fq_codel_change,
715 .dump = fq_codel_dump,
716 .dump_stats = fq_codel_dump_stats,
717 .owner = THIS_MODULE,
720 static int __init fq_codel_module_init(void)
722 return register_qdisc(&fq_codel_qdisc_ops);
725 static void __exit fq_codel_module_exit(void)
727 unregister_qdisc(&fq_codel_qdisc_ops);
730 module_init(fq_codel_module_init)
731 module_exit(fq_codel_module_exit)
732 MODULE_AUTHOR("Eric Dumazet");
733 MODULE_LICENSE("GPL");