Merge tag 'pwm/for-6.2-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/thierry...
[linux-block.git] / net / sched / sch_fq_codel.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Fair Queue CoDel discipline
4  *
5  *  Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com>
6  */
7
8 #include <linux/module.h>
9 #include <linux/types.h>
10 #include <linux/kernel.h>
11 #include <linux/jiffies.h>
12 #include <linux/string.h>
13 #include <linux/in.h>
14 #include <linux/errno.h>
15 #include <linux/init.h>
16 #include <linux/skbuff.h>
17 #include <linux/slab.h>
18 #include <linux/vmalloc.h>
19 #include <net/netlink.h>
20 #include <net/pkt_sched.h>
21 #include <net/pkt_cls.h>
22 #include <net/codel.h>
23 #include <net/codel_impl.h>
24 #include <net/codel_qdisc.h>
25
26 /*      Fair Queue CoDel.
27  *
28  * Principles :
29  * Packets are classified (internal classifier or external) on flows.
30  * This is a Stochastic model (as we use a hash, several flows
31  *                             might be hashed on same slot)
32  * Each flow has a CoDel managed queue.
33  * Flows are linked onto two (Round Robin) lists,
34  * so that new flows have priority on old ones.
35  *
36  * For a given flow, packets are not reordered (CoDel uses a FIFO)
37  * head drops only.
38  * ECN capability is on by default.
39  * Low memory footprint (64 bytes per flow)
40  */
41
42 struct fq_codel_flow {
43         struct sk_buff    *head;
44         struct sk_buff    *tail;
45         struct list_head  flowchain;
46         int               deficit;
47         struct codel_vars cvars;
48 }; /* please try to keep this structure <= 64 bytes */
49
50 struct fq_codel_sched_data {
51         struct tcf_proto __rcu *filter_list; /* optional external classifier */
52         struct tcf_block *block;
53         struct fq_codel_flow *flows;    /* Flows table [flows_cnt] */
54         u32             *backlogs;      /* backlog table [flows_cnt] */
55         u32             flows_cnt;      /* number of flows */
56         u32             quantum;        /* psched_mtu(qdisc_dev(sch)); */
57         u32             drop_batch_size;
58         u32             memory_limit;
59         struct codel_params cparams;
60         struct codel_stats cstats;
61         u32             memory_usage;
62         u32             drop_overmemory;
63         u32             drop_overlimit;
64         u32             new_flow_count;
65
66         struct list_head new_flows;     /* list of new flows */
67         struct list_head old_flows;     /* list of old flows */
68 };
69
70 static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q,
71                                   struct sk_buff *skb)
72 {
73         return reciprocal_scale(skb_get_hash(skb), q->flows_cnt);
74 }
75
76 static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch,
77                                       int *qerr)
78 {
79         struct fq_codel_sched_data *q = qdisc_priv(sch);
80         struct tcf_proto *filter;
81         struct tcf_result res;
82         int result;
83
84         if (TC_H_MAJ(skb->priority) == sch->handle &&
85             TC_H_MIN(skb->priority) > 0 &&
86             TC_H_MIN(skb->priority) <= q->flows_cnt)
87                 return TC_H_MIN(skb->priority);
88
89         filter = rcu_dereference_bh(q->filter_list);
90         if (!filter)
91                 return fq_codel_hash(q, skb) + 1;
92
93         *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
94         result = tcf_classify(skb, NULL, filter, &res, false);
95         if (result >= 0) {
96 #ifdef CONFIG_NET_CLS_ACT
97                 switch (result) {
98                 case TC_ACT_STOLEN:
99                 case TC_ACT_QUEUED:
100                 case TC_ACT_TRAP:
101                         *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
102                         fallthrough;
103                 case TC_ACT_SHOT:
104                         return 0;
105                 }
106 #endif
107                 if (TC_H_MIN(res.classid) <= q->flows_cnt)
108                         return TC_H_MIN(res.classid);
109         }
110         return 0;
111 }
112
113 /* helper functions : might be changed when/if skb use a standard list_head */
114
115 /* remove one skb from head of slot queue */
116 static inline struct sk_buff *dequeue_head(struct fq_codel_flow *flow)
117 {
118         struct sk_buff *skb = flow->head;
119
120         flow->head = skb->next;
121         skb_mark_not_on_list(skb);
122         return skb;
123 }
124
125 /* add skb to flow queue (tail add) */
126 static inline void flow_queue_add(struct fq_codel_flow *flow,
127                                   struct sk_buff *skb)
128 {
129         if (flow->head == NULL)
130                 flow->head = skb;
131         else
132                 flow->tail->next = skb;
133         flow->tail = skb;
134         skb->next = NULL;
135 }
136
137 static unsigned int fq_codel_drop(struct Qdisc *sch, unsigned int max_packets,
138                                   struct sk_buff **to_free)
139 {
140         struct fq_codel_sched_data *q = qdisc_priv(sch);
141         struct sk_buff *skb;
142         unsigned int maxbacklog = 0, idx = 0, i, len;
143         struct fq_codel_flow *flow;
144         unsigned int threshold;
145         unsigned int mem = 0;
146
147         /* Queue is full! Find the fat flow and drop packet(s) from it.
148          * This might sound expensive, but with 1024 flows, we scan
149          * 4KB of memory, and we dont need to handle a complex tree
150          * in fast path (packet queue/enqueue) with many cache misses.
151          * In stress mode, we'll try to drop 64 packets from the flow,
152          * amortizing this linear lookup to one cache line per drop.
153          */
154         for (i = 0; i < q->flows_cnt; i++) {
155                 if (q->backlogs[i] > maxbacklog) {
156                         maxbacklog = q->backlogs[i];
157                         idx = i;
158                 }
159         }
160
161         /* Our goal is to drop half of this fat flow backlog */
162         threshold = maxbacklog >> 1;
163
164         flow = &q->flows[idx];
165         len = 0;
166         i = 0;
167         do {
168                 skb = dequeue_head(flow);
169                 len += qdisc_pkt_len(skb);
170                 mem += get_codel_cb(skb)->mem_usage;
171                 __qdisc_drop(skb, to_free);
172         } while (++i < max_packets && len < threshold);
173
174         /* Tell codel to increase its signal strength also */
175         flow->cvars.count += i;
176         q->backlogs[idx] -= len;
177         q->memory_usage -= mem;
178         sch->qstats.drops += i;
179         sch->qstats.backlog -= len;
180         sch->q.qlen -= i;
181         return idx;
182 }
183
184 static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch,
185                             struct sk_buff **to_free)
186 {
187         struct fq_codel_sched_data *q = qdisc_priv(sch);
188         unsigned int idx, prev_backlog, prev_qlen;
189         struct fq_codel_flow *flow;
190         int ret;
191         unsigned int pkt_len;
192         bool memory_limited;
193
194         idx = fq_codel_classify(skb, sch, &ret);
195         if (idx == 0) {
196                 if (ret & __NET_XMIT_BYPASS)
197                         qdisc_qstats_drop(sch);
198                 __qdisc_drop(skb, to_free);
199                 return ret;
200         }
201         idx--;
202
203         codel_set_enqueue_time(skb);
204         flow = &q->flows[idx];
205         flow_queue_add(flow, skb);
206         q->backlogs[idx] += qdisc_pkt_len(skb);
207         qdisc_qstats_backlog_inc(sch, skb);
208
209         if (list_empty(&flow->flowchain)) {
210                 list_add_tail(&flow->flowchain, &q->new_flows);
211                 q->new_flow_count++;
212                 flow->deficit = q->quantum;
213         }
214         get_codel_cb(skb)->mem_usage = skb->truesize;
215         q->memory_usage += get_codel_cb(skb)->mem_usage;
216         memory_limited = q->memory_usage > q->memory_limit;
217         if (++sch->q.qlen <= sch->limit && !memory_limited)
218                 return NET_XMIT_SUCCESS;
219
220         prev_backlog = sch->qstats.backlog;
221         prev_qlen = sch->q.qlen;
222
223         /* save this packet length as it might be dropped by fq_codel_drop() */
224         pkt_len = qdisc_pkt_len(skb);
225         /* fq_codel_drop() is quite expensive, as it performs a linear search
226          * in q->backlogs[] to find a fat flow.
227          * So instead of dropping a single packet, drop half of its backlog
228          * with a 64 packets limit to not add a too big cpu spike here.
229          */
230         ret = fq_codel_drop(sch, q->drop_batch_size, to_free);
231
232         prev_qlen -= sch->q.qlen;
233         prev_backlog -= sch->qstats.backlog;
234         q->drop_overlimit += prev_qlen;
235         if (memory_limited)
236                 q->drop_overmemory += prev_qlen;
237
238         /* As we dropped packet(s), better let upper stack know this.
239          * If we dropped a packet for this flow, return NET_XMIT_CN,
240          * but in this case, our parents wont increase their backlogs.
241          */
242         if (ret == idx) {
243                 qdisc_tree_reduce_backlog(sch, prev_qlen - 1,
244                                           prev_backlog - pkt_len);
245                 return NET_XMIT_CN;
246         }
247         qdisc_tree_reduce_backlog(sch, prev_qlen, prev_backlog);
248         return NET_XMIT_SUCCESS;
249 }
250
251 /* This is the specific function called from codel_dequeue()
252  * to dequeue a packet from queue. Note: backlog is handled in
253  * codel, we dont need to reduce it here.
254  */
255 static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx)
256 {
257         struct Qdisc *sch = ctx;
258         struct fq_codel_sched_data *q = qdisc_priv(sch);
259         struct fq_codel_flow *flow;
260         struct sk_buff *skb = NULL;
261
262         flow = container_of(vars, struct fq_codel_flow, cvars);
263         if (flow->head) {
264                 skb = dequeue_head(flow);
265                 q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb);
266                 q->memory_usage -= get_codel_cb(skb)->mem_usage;
267                 sch->q.qlen--;
268                 sch->qstats.backlog -= qdisc_pkt_len(skb);
269         }
270         return skb;
271 }
272
273 static void drop_func(struct sk_buff *skb, void *ctx)
274 {
275         struct Qdisc *sch = ctx;
276
277         kfree_skb(skb);
278         qdisc_qstats_drop(sch);
279 }
280
281 static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch)
282 {
283         struct fq_codel_sched_data *q = qdisc_priv(sch);
284         struct sk_buff *skb;
285         struct fq_codel_flow *flow;
286         struct list_head *head;
287
288 begin:
289         head = &q->new_flows;
290         if (list_empty(head)) {
291                 head = &q->old_flows;
292                 if (list_empty(head))
293                         return NULL;
294         }
295         flow = list_first_entry(head, struct fq_codel_flow, flowchain);
296
297         if (flow->deficit <= 0) {
298                 flow->deficit += q->quantum;
299                 list_move_tail(&flow->flowchain, &q->old_flows);
300                 goto begin;
301         }
302
303         skb = codel_dequeue(sch, &sch->qstats.backlog, &q->cparams,
304                             &flow->cvars, &q->cstats, qdisc_pkt_len,
305                             codel_get_enqueue_time, drop_func, dequeue_func);
306
307         if (!skb) {
308                 /* force a pass through old_flows to prevent starvation */
309                 if ((head == &q->new_flows) && !list_empty(&q->old_flows))
310                         list_move_tail(&flow->flowchain, &q->old_flows);
311                 else
312                         list_del_init(&flow->flowchain);
313                 goto begin;
314         }
315         qdisc_bstats_update(sch, skb);
316         flow->deficit -= qdisc_pkt_len(skb);
317         /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
318          * or HTB crashes. Defer it for next round.
319          */
320         if (q->cstats.drop_count && sch->q.qlen) {
321                 qdisc_tree_reduce_backlog(sch, q->cstats.drop_count,
322                                           q->cstats.drop_len);
323                 q->cstats.drop_count = 0;
324                 q->cstats.drop_len = 0;
325         }
326         return skb;
327 }
328
329 static void fq_codel_flow_purge(struct fq_codel_flow *flow)
330 {
331         rtnl_kfree_skbs(flow->head, flow->tail);
332         flow->head = NULL;
333 }
334
335 static void fq_codel_reset(struct Qdisc *sch)
336 {
337         struct fq_codel_sched_data *q = qdisc_priv(sch);
338         int i;
339
340         INIT_LIST_HEAD(&q->new_flows);
341         INIT_LIST_HEAD(&q->old_flows);
342         for (i = 0; i < q->flows_cnt; i++) {
343                 struct fq_codel_flow *flow = q->flows + i;
344
345                 fq_codel_flow_purge(flow);
346                 INIT_LIST_HEAD(&flow->flowchain);
347                 codel_vars_init(&flow->cvars);
348         }
349         memset(q->backlogs, 0, q->flows_cnt * sizeof(u32));
350         q->memory_usage = 0;
351 }
352
353 static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = {
354         [TCA_FQ_CODEL_TARGET]   = { .type = NLA_U32 },
355         [TCA_FQ_CODEL_LIMIT]    = { .type = NLA_U32 },
356         [TCA_FQ_CODEL_INTERVAL] = { .type = NLA_U32 },
357         [TCA_FQ_CODEL_ECN]      = { .type = NLA_U32 },
358         [TCA_FQ_CODEL_FLOWS]    = { .type = NLA_U32 },
359         [TCA_FQ_CODEL_QUANTUM]  = { .type = NLA_U32 },
360         [TCA_FQ_CODEL_CE_THRESHOLD] = { .type = NLA_U32 },
361         [TCA_FQ_CODEL_DROP_BATCH_SIZE] = { .type = NLA_U32 },
362         [TCA_FQ_CODEL_MEMORY_LIMIT] = { .type = NLA_U32 },
363         [TCA_FQ_CODEL_CE_THRESHOLD_SELECTOR] = { .type = NLA_U8 },
364         [TCA_FQ_CODEL_CE_THRESHOLD_MASK] = { .type = NLA_U8 },
365 };
366
367 static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt,
368                            struct netlink_ext_ack *extack)
369 {
370         struct fq_codel_sched_data *q = qdisc_priv(sch);
371         struct nlattr *tb[TCA_FQ_CODEL_MAX + 1];
372         u32 quantum = 0;
373         int err;
374
375         err = nla_parse_nested_deprecated(tb, TCA_FQ_CODEL_MAX, opt,
376                                           fq_codel_policy, NULL);
377         if (err < 0)
378                 return err;
379         if (tb[TCA_FQ_CODEL_FLOWS]) {
380                 if (q->flows)
381                         return -EINVAL;
382                 q->flows_cnt = nla_get_u32(tb[TCA_FQ_CODEL_FLOWS]);
383                 if (!q->flows_cnt ||
384                     q->flows_cnt > 65536)
385                         return -EINVAL;
386         }
387         if (tb[TCA_FQ_CODEL_QUANTUM]) {
388                 quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM]));
389                 if (quantum > FQ_CODEL_QUANTUM_MAX) {
390                         NL_SET_ERR_MSG(extack, "Invalid quantum");
391                         return -EINVAL;
392                 }
393         }
394         sch_tree_lock(sch);
395
396         if (tb[TCA_FQ_CODEL_TARGET]) {
397                 u64 target = nla_get_u32(tb[TCA_FQ_CODEL_TARGET]);
398
399                 q->cparams.target = (target * NSEC_PER_USEC) >> CODEL_SHIFT;
400         }
401
402         if (tb[TCA_FQ_CODEL_CE_THRESHOLD]) {
403                 u64 val = nla_get_u32(tb[TCA_FQ_CODEL_CE_THRESHOLD]);
404
405                 q->cparams.ce_threshold = (val * NSEC_PER_USEC) >> CODEL_SHIFT;
406         }
407
408         if (tb[TCA_FQ_CODEL_CE_THRESHOLD_SELECTOR])
409                 q->cparams.ce_threshold_selector = nla_get_u8(tb[TCA_FQ_CODEL_CE_THRESHOLD_SELECTOR]);
410         if (tb[TCA_FQ_CODEL_CE_THRESHOLD_MASK])
411                 q->cparams.ce_threshold_mask = nla_get_u8(tb[TCA_FQ_CODEL_CE_THRESHOLD_MASK]);
412
413         if (tb[TCA_FQ_CODEL_INTERVAL]) {
414                 u64 interval = nla_get_u32(tb[TCA_FQ_CODEL_INTERVAL]);
415
416                 q->cparams.interval = (interval * NSEC_PER_USEC) >> CODEL_SHIFT;
417         }
418
419         if (tb[TCA_FQ_CODEL_LIMIT])
420                 sch->limit = nla_get_u32(tb[TCA_FQ_CODEL_LIMIT]);
421
422         if (tb[TCA_FQ_CODEL_ECN])
423                 q->cparams.ecn = !!nla_get_u32(tb[TCA_FQ_CODEL_ECN]);
424
425         if (quantum)
426                 q->quantum = quantum;
427
428         if (tb[TCA_FQ_CODEL_DROP_BATCH_SIZE])
429                 q->drop_batch_size = max(1U, nla_get_u32(tb[TCA_FQ_CODEL_DROP_BATCH_SIZE]));
430
431         if (tb[TCA_FQ_CODEL_MEMORY_LIMIT])
432                 q->memory_limit = min(1U << 31, nla_get_u32(tb[TCA_FQ_CODEL_MEMORY_LIMIT]));
433
434         while (sch->q.qlen > sch->limit ||
435                q->memory_usage > q->memory_limit) {
436                 struct sk_buff *skb = fq_codel_dequeue(sch);
437
438                 q->cstats.drop_len += qdisc_pkt_len(skb);
439                 rtnl_kfree_skbs(skb, skb);
440                 q->cstats.drop_count++;
441         }
442         qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, q->cstats.drop_len);
443         q->cstats.drop_count = 0;
444         q->cstats.drop_len = 0;
445
446         sch_tree_unlock(sch);
447         return 0;
448 }
449
450 static void fq_codel_destroy(struct Qdisc *sch)
451 {
452         struct fq_codel_sched_data *q = qdisc_priv(sch);
453
454         tcf_block_put(q->block);
455         kvfree(q->backlogs);
456         kvfree(q->flows);
457 }
458
459 static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt,
460                          struct netlink_ext_ack *extack)
461 {
462         struct fq_codel_sched_data *q = qdisc_priv(sch);
463         int i;
464         int err;
465
466         sch->limit = 10*1024;
467         q->flows_cnt = 1024;
468         q->memory_limit = 32 << 20; /* 32 MBytes */
469         q->drop_batch_size = 64;
470         q->quantum = psched_mtu(qdisc_dev(sch));
471         INIT_LIST_HEAD(&q->new_flows);
472         INIT_LIST_HEAD(&q->old_flows);
473         codel_params_init(&q->cparams);
474         codel_stats_init(&q->cstats);
475         q->cparams.ecn = true;
476         q->cparams.mtu = psched_mtu(qdisc_dev(sch));
477
478         if (opt) {
479                 err = fq_codel_change(sch, opt, extack);
480                 if (err)
481                         goto init_failure;
482         }
483
484         err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
485         if (err)
486                 goto init_failure;
487
488         if (!q->flows) {
489                 q->flows = kvcalloc(q->flows_cnt,
490                                     sizeof(struct fq_codel_flow),
491                                     GFP_KERNEL);
492                 if (!q->flows) {
493                         err = -ENOMEM;
494                         goto init_failure;
495                 }
496                 q->backlogs = kvcalloc(q->flows_cnt, sizeof(u32), GFP_KERNEL);
497                 if (!q->backlogs) {
498                         err = -ENOMEM;
499                         goto alloc_failure;
500                 }
501                 for (i = 0; i < q->flows_cnt; i++) {
502                         struct fq_codel_flow *flow = q->flows + i;
503
504                         INIT_LIST_HEAD(&flow->flowchain);
505                         codel_vars_init(&flow->cvars);
506                 }
507         }
508         if (sch->limit >= 1)
509                 sch->flags |= TCQ_F_CAN_BYPASS;
510         else
511                 sch->flags &= ~TCQ_F_CAN_BYPASS;
512         return 0;
513
514 alloc_failure:
515         kvfree(q->flows);
516         q->flows = NULL;
517 init_failure:
518         q->flows_cnt = 0;
519         return err;
520 }
521
522 static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb)
523 {
524         struct fq_codel_sched_data *q = qdisc_priv(sch);
525         struct nlattr *opts;
526
527         opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
528         if (opts == NULL)
529                 goto nla_put_failure;
530
531         if (nla_put_u32(skb, TCA_FQ_CODEL_TARGET,
532                         codel_time_to_us(q->cparams.target)) ||
533             nla_put_u32(skb, TCA_FQ_CODEL_LIMIT,
534                         sch->limit) ||
535             nla_put_u32(skb, TCA_FQ_CODEL_INTERVAL,
536                         codel_time_to_us(q->cparams.interval)) ||
537             nla_put_u32(skb, TCA_FQ_CODEL_ECN,
538                         q->cparams.ecn) ||
539             nla_put_u32(skb, TCA_FQ_CODEL_QUANTUM,
540                         q->quantum) ||
541             nla_put_u32(skb, TCA_FQ_CODEL_DROP_BATCH_SIZE,
542                         q->drop_batch_size) ||
543             nla_put_u32(skb, TCA_FQ_CODEL_MEMORY_LIMIT,
544                         q->memory_limit) ||
545             nla_put_u32(skb, TCA_FQ_CODEL_FLOWS,
546                         q->flows_cnt))
547                 goto nla_put_failure;
548
549         if (q->cparams.ce_threshold != CODEL_DISABLED_THRESHOLD) {
550                 if (nla_put_u32(skb, TCA_FQ_CODEL_CE_THRESHOLD,
551                                 codel_time_to_us(q->cparams.ce_threshold)))
552                         goto nla_put_failure;
553                 if (nla_put_u8(skb, TCA_FQ_CODEL_CE_THRESHOLD_SELECTOR, q->cparams.ce_threshold_selector))
554                         goto nla_put_failure;
555                 if (nla_put_u8(skb, TCA_FQ_CODEL_CE_THRESHOLD_MASK, q->cparams.ce_threshold_mask))
556                         goto nla_put_failure;
557         }
558
559         return nla_nest_end(skb, opts);
560
561 nla_put_failure:
562         return -1;
563 }
564
565 static int fq_codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
566 {
567         struct fq_codel_sched_data *q = qdisc_priv(sch);
568         struct tc_fq_codel_xstats st = {
569                 .type                           = TCA_FQ_CODEL_XSTATS_QDISC,
570         };
571         struct list_head *pos;
572
573         st.qdisc_stats.maxpacket = q->cstats.maxpacket;
574         st.qdisc_stats.drop_overlimit = q->drop_overlimit;
575         st.qdisc_stats.ecn_mark = q->cstats.ecn_mark;
576         st.qdisc_stats.new_flow_count = q->new_flow_count;
577         st.qdisc_stats.ce_mark = q->cstats.ce_mark;
578         st.qdisc_stats.memory_usage  = q->memory_usage;
579         st.qdisc_stats.drop_overmemory = q->drop_overmemory;
580
581         sch_tree_lock(sch);
582         list_for_each(pos, &q->new_flows)
583                 st.qdisc_stats.new_flows_len++;
584
585         list_for_each(pos, &q->old_flows)
586                 st.qdisc_stats.old_flows_len++;
587         sch_tree_unlock(sch);
588
589         return gnet_stats_copy_app(d, &st, sizeof(st));
590 }
591
592 static struct Qdisc *fq_codel_leaf(struct Qdisc *sch, unsigned long arg)
593 {
594         return NULL;
595 }
596
597 static unsigned long fq_codel_find(struct Qdisc *sch, u32 classid)
598 {
599         return 0;
600 }
601
602 static unsigned long fq_codel_bind(struct Qdisc *sch, unsigned long parent,
603                               u32 classid)
604 {
605         return 0;
606 }
607
608 static void fq_codel_unbind(struct Qdisc *q, unsigned long cl)
609 {
610 }
611
612 static struct tcf_block *fq_codel_tcf_block(struct Qdisc *sch, unsigned long cl,
613                                             struct netlink_ext_ack *extack)
614 {
615         struct fq_codel_sched_data *q = qdisc_priv(sch);
616
617         if (cl)
618                 return NULL;
619         return q->block;
620 }
621
622 static int fq_codel_dump_class(struct Qdisc *sch, unsigned long cl,
623                           struct sk_buff *skb, struct tcmsg *tcm)
624 {
625         tcm->tcm_handle |= TC_H_MIN(cl);
626         return 0;
627 }
628
629 static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl,
630                                      struct gnet_dump *d)
631 {
632         struct fq_codel_sched_data *q = qdisc_priv(sch);
633         u32 idx = cl - 1;
634         struct gnet_stats_queue qs = { 0 };
635         struct tc_fq_codel_xstats xstats;
636
637         if (idx < q->flows_cnt) {
638                 const struct fq_codel_flow *flow = &q->flows[idx];
639                 const struct sk_buff *skb;
640
641                 memset(&xstats, 0, sizeof(xstats));
642                 xstats.type = TCA_FQ_CODEL_XSTATS_CLASS;
643                 xstats.class_stats.deficit = flow->deficit;
644                 xstats.class_stats.ldelay =
645                         codel_time_to_us(flow->cvars.ldelay);
646                 xstats.class_stats.count = flow->cvars.count;
647                 xstats.class_stats.lastcount = flow->cvars.lastcount;
648                 xstats.class_stats.dropping = flow->cvars.dropping;
649                 if (flow->cvars.dropping) {
650                         codel_tdiff_t delta = flow->cvars.drop_next -
651                                               codel_get_time();
652
653                         xstats.class_stats.drop_next = (delta >= 0) ?
654                                 codel_time_to_us(delta) :
655                                 -codel_time_to_us(-delta);
656                 }
657                 if (flow->head) {
658                         sch_tree_lock(sch);
659                         skb = flow->head;
660                         while (skb) {
661                                 qs.qlen++;
662                                 skb = skb->next;
663                         }
664                         sch_tree_unlock(sch);
665                 }
666                 qs.backlog = q->backlogs[idx];
667                 qs.drops = 0;
668         }
669         if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0)
670                 return -1;
671         if (idx < q->flows_cnt)
672                 return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
673         return 0;
674 }
675
676 static void fq_codel_walk(struct Qdisc *sch, struct qdisc_walker *arg)
677 {
678         struct fq_codel_sched_data *q = qdisc_priv(sch);
679         unsigned int i;
680
681         if (arg->stop)
682                 return;
683
684         for (i = 0; i < q->flows_cnt; i++) {
685                 if (list_empty(&q->flows[i].flowchain)) {
686                         arg->count++;
687                         continue;
688                 }
689                 if (!tc_qdisc_stats_dump(sch, i + 1, arg))
690                         break;
691         }
692 }
693
694 static const struct Qdisc_class_ops fq_codel_class_ops = {
695         .leaf           =       fq_codel_leaf,
696         .find           =       fq_codel_find,
697         .tcf_block      =       fq_codel_tcf_block,
698         .bind_tcf       =       fq_codel_bind,
699         .unbind_tcf     =       fq_codel_unbind,
700         .dump           =       fq_codel_dump_class,
701         .dump_stats     =       fq_codel_dump_class_stats,
702         .walk           =       fq_codel_walk,
703 };
704
705 static struct Qdisc_ops fq_codel_qdisc_ops __read_mostly = {
706         .cl_ops         =       &fq_codel_class_ops,
707         .id             =       "fq_codel",
708         .priv_size      =       sizeof(struct fq_codel_sched_data),
709         .enqueue        =       fq_codel_enqueue,
710         .dequeue        =       fq_codel_dequeue,
711         .peek           =       qdisc_peek_dequeued,
712         .init           =       fq_codel_init,
713         .reset          =       fq_codel_reset,
714         .destroy        =       fq_codel_destroy,
715         .change         =       fq_codel_change,
716         .dump           =       fq_codel_dump,
717         .dump_stats =   fq_codel_dump_stats,
718         .owner          =       THIS_MODULE,
719 };
720
721 static int __init fq_codel_module_init(void)
722 {
723         return register_qdisc(&fq_codel_qdisc_ops);
724 }
725
726 static void __exit fq_codel_module_exit(void)
727 {
728         unregister_qdisc(&fq_codel_qdisc_ops);
729 }
730
731 module_init(fq_codel_module_init)
732 module_exit(fq_codel_module_exit)
733 MODULE_AUTHOR("Eric Dumazet");
734 MODULE_LICENSE("GPL");
735 MODULE_DESCRIPTION("Fair Queue CoDel discipline");