Commit | Line | Data |
---|---|---|
4b549a2e ED |
1 | /* |
2 | * Fair Queue CoDel discipline | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public License | |
6 | * as published by the Free Software Foundation; either version | |
7 | * 2 of the License, or (at your option) any later version. | |
8 | * | |
80ba92fa | 9 | * Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com> |
4b549a2e ED |
10 | */ |
11 | ||
12 | #include <linux/module.h> | |
13 | #include <linux/types.h> | |
14 | #include <linux/kernel.h> | |
15 | #include <linux/jiffies.h> | |
16 | #include <linux/string.h> | |
17 | #include <linux/in.h> | |
18 | #include <linux/errno.h> | |
19 | #include <linux/init.h> | |
20 | #include <linux/skbuff.h> | |
21 | #include <linux/jhash.h> | |
22 | #include <linux/slab.h> | |
23 | #include <linux/vmalloc.h> | |
24 | #include <net/netlink.h> | |
25 | #include <net/pkt_sched.h> | |
cf1facda | 26 | #include <net/pkt_cls.h> |
4b549a2e | 27 | #include <net/codel.h> |
d068ca2a MK |
28 | #include <net/codel_impl.h> |
29 | #include <net/codel_qdisc.h> | |
4b549a2e ED |
30 | |
31 | /* Fair Queue CoDel. | |
32 | * | |
33 | * Principles : | |
34 | * Packets are classified (internal classifier or external) on flows. | |
35 | * This is a Stochastic model (as we use a hash, several flows | |
36 | * might be hashed on same slot) | |
37 | * Each flow has a CoDel managed queue. | |
38 | * Flows are linked onto two (Round Robin) lists, | |
39 | * so that new flows have priority on old ones. | |
40 | * | |
41 | * For a given flow, packets are not reordered (CoDel uses a FIFO) | |
42 | * head drops only. | |
43 | * ECN capability is on by default. | |
44 | * Low memory footprint (64 bytes per flow) | |
45 | */ | |
46 | ||
47 | struct fq_codel_flow { | |
48 | struct sk_buff *head; | |
49 | struct sk_buff *tail; | |
50 | struct list_head flowchain; | |
51 | int deficit; | |
52 | u32 dropped; /* number of drops (or ECN marks) on this flow */ | |
53 | struct codel_vars cvars; | |
54 | }; /* please try to keep this structure <= 64 bytes */ | |
55 | ||
56 | struct fq_codel_sched_data { | |
25d8c0d5 | 57 | struct tcf_proto __rcu *filter_list; /* optional external classifier */ |
6529eaba | 58 | struct tcf_block *block; |
4b549a2e ED |
59 | struct fq_codel_flow *flows; /* Flows table [flows_cnt] */ |
60 | u32 *backlogs; /* backlog table [flows_cnt] */ | |
61 | u32 flows_cnt; /* number of flows */ | |
4b549a2e | 62 | u32 quantum; /* psched_mtu(qdisc_dev(sch)); */ |
9d18562a | 63 | u32 drop_batch_size; |
95b58430 | 64 | u32 memory_limit; |
4b549a2e ED |
65 | struct codel_params cparams; |
66 | struct codel_stats cstats; | |
95b58430 ED |
67 | u32 memory_usage; |
68 | u32 drop_overmemory; | |
4b549a2e ED |
69 | u32 drop_overlimit; |
70 | u32 new_flow_count; | |
71 | ||
72 | struct list_head new_flows; /* list of new flows */ | |
73 | struct list_head old_flows; /* list of old flows */ | |
74 | }; | |
75 | ||
76 | static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q, | |
342db221 | 77 | struct sk_buff *skb) |
4b549a2e | 78 | { |
264b87fa | 79 | return reciprocal_scale(skb_get_hash(skb), q->flows_cnt); |
4b549a2e ED |
80 | } |
81 | ||
82 | static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch, | |
83 | int *qerr) | |
84 | { | |
85 | struct fq_codel_sched_data *q = qdisc_priv(sch); | |
25d8c0d5 | 86 | struct tcf_proto *filter; |
4b549a2e ED |
87 | struct tcf_result res; |
88 | int result; | |
89 | ||
90 | if (TC_H_MAJ(skb->priority) == sch->handle && | |
91 | TC_H_MIN(skb->priority) > 0 && | |
92 | TC_H_MIN(skb->priority) <= q->flows_cnt) | |
93 | return TC_H_MIN(skb->priority); | |
94 | ||
69204cf7 | 95 | filter = rcu_dereference_bh(q->filter_list); |
25d8c0d5 | 96 | if (!filter) |
4b549a2e ED |
97 | return fq_codel_hash(q, skb) + 1; |
98 | ||
99 | *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; | |
87d83093 | 100 | result = tcf_classify(skb, filter, &res, false); |
4b549a2e ED |
101 | if (result >= 0) { |
102 | #ifdef CONFIG_NET_CLS_ACT | |
103 | switch (result) { | |
104 | case TC_ACT_STOLEN: | |
105 | case TC_ACT_QUEUED: | |
e25ea21f | 106 | case TC_ACT_TRAP: |
4b549a2e | 107 | *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; |
f3ae608e | 108 | /* fall through */ |
4b549a2e ED |
109 | case TC_ACT_SHOT: |
110 | return 0; | |
111 | } | |
112 | #endif | |
113 | if (TC_H_MIN(res.classid) <= q->flows_cnt) | |
114 | return TC_H_MIN(res.classid); | |
115 | } | |
116 | return 0; | |
117 | } | |
118 | ||
119 | /* helper functions : might be changed when/if skb use a standard list_head */ | |
120 | ||
121 | /* remove one skb from head of slot queue */ | |
122 | static inline struct sk_buff *dequeue_head(struct fq_codel_flow *flow) | |
123 | { | |
124 | struct sk_buff *skb = flow->head; | |
125 | ||
126 | flow->head = skb->next; | |
a8305bff | 127 | skb_mark_not_on_list(skb); |
4b549a2e ED |
128 | return skb; |
129 | } | |
130 | ||
131 | /* add skb to flow queue (tail add) */ | |
132 | static inline void flow_queue_add(struct fq_codel_flow *flow, | |
133 | struct sk_buff *skb) | |
134 | { | |
135 | if (flow->head == NULL) | |
136 | flow->head = skb; | |
137 | else | |
138 | flow->tail->next = skb; | |
139 | flow->tail = skb; | |
140 | skb->next = NULL; | |
141 | } | |
142 | ||
520ac30f ED |
143 | static unsigned int fq_codel_drop(struct Qdisc *sch, unsigned int max_packets, |
144 | struct sk_buff **to_free) | |
4b549a2e ED |
145 | { |
146 | struct fq_codel_sched_data *q = qdisc_priv(sch); | |
147 | struct sk_buff *skb; | |
148 | unsigned int maxbacklog = 0, idx = 0, i, len; | |
149 | struct fq_codel_flow *flow; | |
9d18562a | 150 | unsigned int threshold; |
95b58430 | 151 | unsigned int mem = 0; |
4b549a2e | 152 | |
9d18562a | 153 | /* Queue is full! Find the fat flow and drop packet(s) from it. |
4b549a2e ED |
154 | * This might sound expensive, but with 1024 flows, we scan |
155 | * 4KB of memory, and we dont need to handle a complex tree | |
156 | * in fast path (packet queue/enqueue) with many cache misses. | |
9d18562a ED |
157 | * In stress mode, we'll try to drop 64 packets from the flow, |
158 | * amortizing this linear lookup to one cache line per drop. | |
4b549a2e ED |
159 | */ |
160 | for (i = 0; i < q->flows_cnt; i++) { | |
161 | if (q->backlogs[i] > maxbacklog) { | |
162 | maxbacklog = q->backlogs[i]; | |
163 | idx = i; | |
164 | } | |
165 | } | |
9d18562a ED |
166 | |
167 | /* Our goal is to drop half of this fat flow backlog */ | |
168 | threshold = maxbacklog >> 1; | |
169 | ||
4b549a2e | 170 | flow = &q->flows[idx]; |
9d18562a ED |
171 | len = 0; |
172 | i = 0; | |
173 | do { | |
174 | skb = dequeue_head(flow); | |
175 | len += qdisc_pkt_len(skb); | |
008830bc | 176 | mem += get_codel_cb(skb)->mem_usage; |
520ac30f | 177 | __qdisc_drop(skb, to_free); |
9d18562a ED |
178 | } while (++i < max_packets && len < threshold); |
179 | ||
180 | flow->dropped += i; | |
4b549a2e | 181 | q->backlogs[idx] -= len; |
95b58430 | 182 | q->memory_usage -= mem; |
9d18562a ED |
183 | sch->qstats.drops += i; |
184 | sch->qstats.backlog -= len; | |
185 | sch->q.qlen -= i; | |
4b549a2e ED |
186 | return idx; |
187 | } | |
188 | ||
520ac30f ED |
189 | static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch, |
190 | struct sk_buff **to_free) | |
4b549a2e ED |
191 | { |
192 | struct fq_codel_sched_data *q = qdisc_priv(sch); | |
9d18562a | 193 | unsigned int idx, prev_backlog, prev_qlen; |
4b549a2e ED |
194 | struct fq_codel_flow *flow; |
195 | int uninitialized_var(ret); | |
80e509db | 196 | unsigned int pkt_len; |
95b58430 | 197 | bool memory_limited; |
4b549a2e ED |
198 | |
199 | idx = fq_codel_classify(skb, sch, &ret); | |
200 | if (idx == 0) { | |
201 | if (ret & __NET_XMIT_BYPASS) | |
25331d6c | 202 | qdisc_qstats_drop(sch); |
520ac30f | 203 | __qdisc_drop(skb, to_free); |
4b549a2e ED |
204 | return ret; |
205 | } | |
206 | idx--; | |
207 | ||
208 | codel_set_enqueue_time(skb); | |
209 | flow = &q->flows[idx]; | |
210 | flow_queue_add(flow, skb); | |
211 | q->backlogs[idx] += qdisc_pkt_len(skb); | |
25331d6c | 212 | qdisc_qstats_backlog_inc(sch, skb); |
4b549a2e ED |
213 | |
214 | if (list_empty(&flow->flowchain)) { | |
215 | list_add_tail(&flow->flowchain, &q->new_flows); | |
4b549a2e ED |
216 | q->new_flow_count++; |
217 | flow->deficit = q->quantum; | |
218 | flow->dropped = 0; | |
219 | } | |
008830bc ED |
220 | get_codel_cb(skb)->mem_usage = skb->truesize; |
221 | q->memory_usage += get_codel_cb(skb)->mem_usage; | |
95b58430 ED |
222 | memory_limited = q->memory_usage > q->memory_limit; |
223 | if (++sch->q.qlen <= sch->limit && !memory_limited) | |
4b549a2e ED |
224 | return NET_XMIT_SUCCESS; |
225 | ||
2ccccf5f | 226 | prev_backlog = sch->qstats.backlog; |
9d18562a ED |
227 | prev_qlen = sch->q.qlen; |
228 | ||
80e509db ED |
229 | /* save this packet length as it might be dropped by fq_codel_drop() */ |
230 | pkt_len = qdisc_pkt_len(skb); | |
9d18562a ED |
231 | /* fq_codel_drop() is quite expensive, as it performs a linear search |
232 | * in q->backlogs[] to find a fat flow. | |
233 | * So instead of dropping a single packet, drop half of its backlog | |
234 | * with a 64 packets limit to not add a too big cpu spike here. | |
4b549a2e | 235 | */ |
520ac30f | 236 | ret = fq_codel_drop(sch, q->drop_batch_size, to_free); |
9d18562a | 237 | |
80e509db ED |
238 | prev_qlen -= sch->q.qlen; |
239 | prev_backlog -= sch->qstats.backlog; | |
240 | q->drop_overlimit += prev_qlen; | |
95b58430 | 241 | if (memory_limited) |
80e509db | 242 | q->drop_overmemory += prev_qlen; |
9d18562a | 243 | |
80e509db ED |
244 | /* As we dropped packet(s), better let upper stack know this. |
245 | * If we dropped a packet for this flow, return NET_XMIT_CN, | |
246 | * but in this case, our parents wont increase their backlogs. | |
247 | */ | |
248 | if (ret == idx) { | |
249 | qdisc_tree_reduce_backlog(sch, prev_qlen - 1, | |
250 | prev_backlog - pkt_len); | |
251 | return NET_XMIT_CN; | |
252 | } | |
253 | qdisc_tree_reduce_backlog(sch, prev_qlen, prev_backlog); | |
254 | return NET_XMIT_SUCCESS; | |
4b549a2e ED |
255 | } |
256 | ||
257 | /* This is the specific function called from codel_dequeue() | |
258 | * to dequeue a packet from queue. Note: backlog is handled in | |
259 | * codel, we dont need to reduce it here. | |
260 | */ | |
79bdc4c8 | 261 | static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx) |
4b549a2e | 262 | { |
79bdc4c8 | 263 | struct Qdisc *sch = ctx; |
865ec552 | 264 | struct fq_codel_sched_data *q = qdisc_priv(sch); |
4b549a2e ED |
265 | struct fq_codel_flow *flow; |
266 | struct sk_buff *skb = NULL; | |
267 | ||
268 | flow = container_of(vars, struct fq_codel_flow, cvars); | |
269 | if (flow->head) { | |
270 | skb = dequeue_head(flow); | |
865ec552 | 271 | q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb); |
008830bc | 272 | q->memory_usage -= get_codel_cb(skb)->mem_usage; |
4b549a2e | 273 | sch->q.qlen--; |
79bdc4c8 | 274 | sch->qstats.backlog -= qdisc_pkt_len(skb); |
4b549a2e ED |
275 | } |
276 | return skb; | |
277 | } | |
278 | ||
79bdc4c8 MK |
279 | static void drop_func(struct sk_buff *skb, void *ctx) |
280 | { | |
281 | struct Qdisc *sch = ctx; | |
282 | ||
520ac30f ED |
283 | kfree_skb(skb); |
284 | qdisc_qstats_drop(sch); | |
79bdc4c8 MK |
285 | } |
286 | ||
4b549a2e ED |
287 | static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch) |
288 | { | |
289 | struct fq_codel_sched_data *q = qdisc_priv(sch); | |
290 | struct sk_buff *skb; | |
291 | struct fq_codel_flow *flow; | |
292 | struct list_head *head; | |
293 | u32 prev_drop_count, prev_ecn_mark; | |
294 | ||
295 | begin: | |
296 | head = &q->new_flows; | |
297 | if (list_empty(head)) { | |
298 | head = &q->old_flows; | |
299 | if (list_empty(head)) | |
300 | return NULL; | |
301 | } | |
302 | flow = list_first_entry(head, struct fq_codel_flow, flowchain); | |
303 | ||
304 | if (flow->deficit <= 0) { | |
305 | flow->deficit += q->quantum; | |
306 | list_move_tail(&flow->flowchain, &q->old_flows); | |
307 | goto begin; | |
308 | } | |
309 | ||
310 | prev_drop_count = q->cstats.drop_count; | |
311 | prev_ecn_mark = q->cstats.ecn_mark; | |
312 | ||
79bdc4c8 MK |
313 | skb = codel_dequeue(sch, &sch->qstats.backlog, &q->cparams, |
314 | &flow->cvars, &q->cstats, qdisc_pkt_len, | |
315 | codel_get_enqueue_time, drop_func, dequeue_func); | |
4b549a2e ED |
316 | |
317 | flow->dropped += q->cstats.drop_count - prev_drop_count; | |
318 | flow->dropped += q->cstats.ecn_mark - prev_ecn_mark; | |
319 | ||
320 | if (!skb) { | |
321 | /* force a pass through old_flows to prevent starvation */ | |
322 | if ((head == &q->new_flows) && !list_empty(&q->old_flows)) | |
323 | list_move_tail(&flow->flowchain, &q->old_flows); | |
324 | else | |
325 | list_del_init(&flow->flowchain); | |
326 | goto begin; | |
327 | } | |
328 | qdisc_bstats_update(sch, skb); | |
329 | flow->deficit -= qdisc_pkt_len(skb); | |
2ccccf5f | 330 | /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0, |
4b549a2e ED |
331 | * or HTB crashes. Defer it for next round. |
332 | */ | |
333 | if (q->cstats.drop_count && sch->q.qlen) { | |
2ccccf5f WC |
334 | qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, |
335 | q->cstats.drop_len); | |
4b549a2e | 336 | q->cstats.drop_count = 0; |
2ccccf5f | 337 | q->cstats.drop_len = 0; |
4b549a2e ED |
338 | } |
339 | return skb; | |
340 | } | |
341 | ||
ece5d4c7 ED |
342 | static void fq_codel_flow_purge(struct fq_codel_flow *flow) |
343 | { | |
344 | rtnl_kfree_skbs(flow->head, flow->tail); | |
345 | flow->head = NULL; | |
346 | } | |
347 | ||
4b549a2e ED |
348 | static void fq_codel_reset(struct Qdisc *sch) |
349 | { | |
3d0e0af4 ED |
350 | struct fq_codel_sched_data *q = qdisc_priv(sch); |
351 | int i; | |
4b549a2e | 352 | |
3d0e0af4 ED |
353 | INIT_LIST_HEAD(&q->new_flows); |
354 | INIT_LIST_HEAD(&q->old_flows); | |
355 | for (i = 0; i < q->flows_cnt; i++) { | |
356 | struct fq_codel_flow *flow = q->flows + i; | |
357 | ||
ece5d4c7 | 358 | fq_codel_flow_purge(flow); |
3d0e0af4 ED |
359 | INIT_LIST_HEAD(&flow->flowchain); |
360 | codel_vars_init(&flow->cvars); | |
361 | } | |
362 | memset(q->backlogs, 0, q->flows_cnt * sizeof(u32)); | |
363 | sch->q.qlen = 0; | |
ece5d4c7 | 364 | sch->qstats.backlog = 0; |
77f57761 | 365 | q->memory_usage = 0; |
4b549a2e ED |
366 | } |
367 | ||
368 | static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = { | |
369 | [TCA_FQ_CODEL_TARGET] = { .type = NLA_U32 }, | |
370 | [TCA_FQ_CODEL_LIMIT] = { .type = NLA_U32 }, | |
371 | [TCA_FQ_CODEL_INTERVAL] = { .type = NLA_U32 }, | |
372 | [TCA_FQ_CODEL_ECN] = { .type = NLA_U32 }, | |
373 | [TCA_FQ_CODEL_FLOWS] = { .type = NLA_U32 }, | |
374 | [TCA_FQ_CODEL_QUANTUM] = { .type = NLA_U32 }, | |
80ba92fa | 375 | [TCA_FQ_CODEL_CE_THRESHOLD] = { .type = NLA_U32 }, |
9d18562a | 376 | [TCA_FQ_CODEL_DROP_BATCH_SIZE] = { .type = NLA_U32 }, |
95b58430 | 377 | [TCA_FQ_CODEL_MEMORY_LIMIT] = { .type = NLA_U32 }, |
4b549a2e ED |
378 | }; |
379 | ||
2030721c AA |
380 | static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt, |
381 | struct netlink_ext_ack *extack) | |
4b549a2e ED |
382 | { |
383 | struct fq_codel_sched_data *q = qdisc_priv(sch); | |
384 | struct nlattr *tb[TCA_FQ_CODEL_MAX + 1]; | |
385 | int err; | |
386 | ||
387 | if (!opt) | |
388 | return -EINVAL; | |
389 | ||
fceb6435 JB |
390 | err = nla_parse_nested(tb, TCA_FQ_CODEL_MAX, opt, fq_codel_policy, |
391 | NULL); | |
4b549a2e ED |
392 | if (err < 0) |
393 | return err; | |
394 | if (tb[TCA_FQ_CODEL_FLOWS]) { | |
395 | if (q->flows) | |
396 | return -EINVAL; | |
397 | q->flows_cnt = nla_get_u32(tb[TCA_FQ_CODEL_FLOWS]); | |
398 | if (!q->flows_cnt || | |
399 | q->flows_cnt > 65536) | |
400 | return -EINVAL; | |
401 | } | |
402 | sch_tree_lock(sch); | |
403 | ||
404 | if (tb[TCA_FQ_CODEL_TARGET]) { | |
405 | u64 target = nla_get_u32(tb[TCA_FQ_CODEL_TARGET]); | |
406 | ||
407 | q->cparams.target = (target * NSEC_PER_USEC) >> CODEL_SHIFT; | |
408 | } | |
409 | ||
80ba92fa ED |
410 | if (tb[TCA_FQ_CODEL_CE_THRESHOLD]) { |
411 | u64 val = nla_get_u32(tb[TCA_FQ_CODEL_CE_THRESHOLD]); | |
412 | ||
413 | q->cparams.ce_threshold = (val * NSEC_PER_USEC) >> CODEL_SHIFT; | |
414 | } | |
415 | ||
4b549a2e ED |
416 | if (tb[TCA_FQ_CODEL_INTERVAL]) { |
417 | u64 interval = nla_get_u32(tb[TCA_FQ_CODEL_INTERVAL]); | |
418 | ||
419 | q->cparams.interval = (interval * NSEC_PER_USEC) >> CODEL_SHIFT; | |
420 | } | |
421 | ||
422 | if (tb[TCA_FQ_CODEL_LIMIT]) | |
423 | sch->limit = nla_get_u32(tb[TCA_FQ_CODEL_LIMIT]); | |
424 | ||
425 | if (tb[TCA_FQ_CODEL_ECN]) | |
426 | q->cparams.ecn = !!nla_get_u32(tb[TCA_FQ_CODEL_ECN]); | |
427 | ||
428 | if (tb[TCA_FQ_CODEL_QUANTUM]) | |
429 | q->quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM])); | |
430 | ||
9d18562a ED |
431 | if (tb[TCA_FQ_CODEL_DROP_BATCH_SIZE]) |
432 | q->drop_batch_size = min(1U, nla_get_u32(tb[TCA_FQ_CODEL_DROP_BATCH_SIZE])); | |
433 | ||
95b58430 ED |
434 | if (tb[TCA_FQ_CODEL_MEMORY_LIMIT]) |
435 | q->memory_limit = min(1U << 31, nla_get_u32(tb[TCA_FQ_CODEL_MEMORY_LIMIT])); | |
436 | ||
437 | while (sch->q.qlen > sch->limit || | |
438 | q->memory_usage > q->memory_limit) { | |
4b549a2e ED |
439 | struct sk_buff *skb = fq_codel_dequeue(sch); |
440 | ||
2ccccf5f | 441 | q->cstats.drop_len += qdisc_pkt_len(skb); |
ece5d4c7 | 442 | rtnl_kfree_skbs(skb, skb); |
4b549a2e ED |
443 | q->cstats.drop_count++; |
444 | } | |
2ccccf5f | 445 | qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, q->cstats.drop_len); |
4b549a2e | 446 | q->cstats.drop_count = 0; |
2ccccf5f | 447 | q->cstats.drop_len = 0; |
4b549a2e ED |
448 | |
449 | sch_tree_unlock(sch); | |
450 | return 0; | |
451 | } | |
452 | ||
4b549a2e ED |
453 | static void fq_codel_destroy(struct Qdisc *sch) |
454 | { | |
455 | struct fq_codel_sched_data *q = qdisc_priv(sch); | |
456 | ||
6529eaba | 457 | tcf_block_put(q->block); |
752ade68 MH |
458 | kvfree(q->backlogs); |
459 | kvfree(q->flows); | |
4b549a2e ED |
460 | } |
461 | ||
e63d7dfd AA |
462 | static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt, |
463 | struct netlink_ext_ack *extack) | |
4b549a2e ED |
464 | { |
465 | struct fq_codel_sched_data *q = qdisc_priv(sch); | |
466 | int i; | |
6529eaba | 467 | int err; |
4b549a2e ED |
468 | |
469 | sch->limit = 10*1024; | |
470 | q->flows_cnt = 1024; | |
95b58430 | 471 | q->memory_limit = 32 << 20; /* 32 MBytes */ |
9d18562a | 472 | q->drop_batch_size = 64; |
4b549a2e | 473 | q->quantum = psched_mtu(qdisc_dev(sch)); |
4b549a2e ED |
474 | INIT_LIST_HEAD(&q->new_flows); |
475 | INIT_LIST_HEAD(&q->old_flows); | |
79bdc4c8 | 476 | codel_params_init(&q->cparams); |
4b549a2e ED |
477 | codel_stats_init(&q->cstats); |
478 | q->cparams.ecn = true; | |
79bdc4c8 | 479 | q->cparams.mtu = psched_mtu(qdisc_dev(sch)); |
4b549a2e ED |
480 | |
481 | if (opt) { | |
83fe6b87 | 482 | err = fq_codel_change(sch, opt, extack); |
4b549a2e | 483 | if (err) |
83fe6b87 | 484 | goto init_failure; |
4b549a2e ED |
485 | } |
486 | ||
8d1a77f9 | 487 | err = tcf_block_get(&q->block, &q->filter_list, sch, extack); |
6529eaba | 488 | if (err) |
83fe6b87 | 489 | goto init_failure; |
6529eaba | 490 | |
4b549a2e | 491 | if (!q->flows) { |
778e1cdd KC |
492 | q->flows = kvcalloc(q->flows_cnt, |
493 | sizeof(struct fq_codel_flow), | |
494 | GFP_KERNEL); | |
83fe6b87 JK |
495 | if (!q->flows) { |
496 | err = -ENOMEM; | |
497 | goto init_failure; | |
498 | } | |
778e1cdd | 499 | q->backlogs = kvcalloc(q->flows_cnt, sizeof(u32), GFP_KERNEL); |
83fe6b87 JK |
500 | if (!q->backlogs) { |
501 | err = -ENOMEM; | |
502 | goto alloc_failure; | |
503 | } | |
4b549a2e ED |
504 | for (i = 0; i < q->flows_cnt; i++) { |
505 | struct fq_codel_flow *flow = q->flows + i; | |
506 | ||
507 | INIT_LIST_HEAD(&flow->flowchain); | |
b379135c | 508 | codel_vars_init(&flow->cvars); |
4b549a2e ED |
509 | } |
510 | } | |
511 | if (sch->limit >= 1) | |
512 | sch->flags |= TCQ_F_CAN_BYPASS; | |
513 | else | |
514 | sch->flags &= ~TCQ_F_CAN_BYPASS; | |
515 | return 0; | |
83fe6b87 JK |
516 | |
517 | alloc_failure: | |
518 | kvfree(q->flows); | |
519 | q->flows = NULL; | |
520 | init_failure: | |
521 | q->flows_cnt = 0; | |
522 | return err; | |
4b549a2e ED |
523 | } |
524 | ||
525 | static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb) | |
526 | { | |
527 | struct fq_codel_sched_data *q = qdisc_priv(sch); | |
528 | struct nlattr *opts; | |
529 | ||
ae0be8de | 530 | opts = nla_nest_start_noflag(skb, TCA_OPTIONS); |
4b549a2e ED |
531 | if (opts == NULL) |
532 | goto nla_put_failure; | |
533 | ||
534 | if (nla_put_u32(skb, TCA_FQ_CODEL_TARGET, | |
535 | codel_time_to_us(q->cparams.target)) || | |
536 | nla_put_u32(skb, TCA_FQ_CODEL_LIMIT, | |
537 | sch->limit) || | |
538 | nla_put_u32(skb, TCA_FQ_CODEL_INTERVAL, | |
539 | codel_time_to_us(q->cparams.interval)) || | |
540 | nla_put_u32(skb, TCA_FQ_CODEL_ECN, | |
541 | q->cparams.ecn) || | |
542 | nla_put_u32(skb, TCA_FQ_CODEL_QUANTUM, | |
543 | q->quantum) || | |
9d18562a ED |
544 | nla_put_u32(skb, TCA_FQ_CODEL_DROP_BATCH_SIZE, |
545 | q->drop_batch_size) || | |
95b58430 ED |
546 | nla_put_u32(skb, TCA_FQ_CODEL_MEMORY_LIMIT, |
547 | q->memory_limit) || | |
4b549a2e ED |
548 | nla_put_u32(skb, TCA_FQ_CODEL_FLOWS, |
549 | q->flows_cnt)) | |
550 | goto nla_put_failure; | |
551 | ||
80ba92fa ED |
552 | if (q->cparams.ce_threshold != CODEL_DISABLED_THRESHOLD && |
553 | nla_put_u32(skb, TCA_FQ_CODEL_CE_THRESHOLD, | |
554 | codel_time_to_us(q->cparams.ce_threshold))) | |
555 | goto nla_put_failure; | |
556 | ||
d59b7d80 | 557 | return nla_nest_end(skb, opts); |
4b549a2e ED |
558 | |
559 | nla_put_failure: | |
560 | return -1; | |
561 | } | |
562 | ||
563 | static int fq_codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d) | |
564 | { | |
565 | struct fq_codel_sched_data *q = qdisc_priv(sch); | |
566 | struct tc_fq_codel_xstats st = { | |
567 | .type = TCA_FQ_CODEL_XSTATS_QDISC, | |
4b549a2e ED |
568 | }; |
569 | struct list_head *pos; | |
570 | ||
669d67bf SL |
571 | st.qdisc_stats.maxpacket = q->cstats.maxpacket; |
572 | st.qdisc_stats.drop_overlimit = q->drop_overlimit; | |
573 | st.qdisc_stats.ecn_mark = q->cstats.ecn_mark; | |
574 | st.qdisc_stats.new_flow_count = q->new_flow_count; | |
80ba92fa | 575 | st.qdisc_stats.ce_mark = q->cstats.ce_mark; |
95b58430 ED |
576 | st.qdisc_stats.memory_usage = q->memory_usage; |
577 | st.qdisc_stats.drop_overmemory = q->drop_overmemory; | |
669d67bf | 578 | |
edb09eb1 | 579 | sch_tree_lock(sch); |
4b549a2e ED |
580 | list_for_each(pos, &q->new_flows) |
581 | st.qdisc_stats.new_flows_len++; | |
582 | ||
583 | list_for_each(pos, &q->old_flows) | |
584 | st.qdisc_stats.old_flows_len++; | |
edb09eb1 | 585 | sch_tree_unlock(sch); |
4b549a2e ED |
586 | |
587 | return gnet_stats_copy_app(d, &st, sizeof(st)); | |
588 | } | |
589 | ||
590 | static struct Qdisc *fq_codel_leaf(struct Qdisc *sch, unsigned long arg) | |
591 | { | |
592 | return NULL; | |
593 | } | |
594 | ||
143976ce | 595 | static unsigned long fq_codel_find(struct Qdisc *sch, u32 classid) |
4b549a2e ED |
596 | { |
597 | return 0; | |
598 | } | |
599 | ||
600 | static unsigned long fq_codel_bind(struct Qdisc *sch, unsigned long parent, | |
601 | u32 classid) | |
602 | { | |
603 | /* we cannot bypass queue discipline anymore */ | |
604 | sch->flags &= ~TCQ_F_CAN_BYPASS; | |
605 | return 0; | |
606 | } | |
607 | ||
143976ce | 608 | static void fq_codel_unbind(struct Qdisc *q, unsigned long cl) |
4b549a2e ED |
609 | { |
610 | } | |
611 | ||
cbaacc4e AA |
612 | static struct tcf_block *fq_codel_tcf_block(struct Qdisc *sch, unsigned long cl, |
613 | struct netlink_ext_ack *extack) | |
4b549a2e ED |
614 | { |
615 | struct fq_codel_sched_data *q = qdisc_priv(sch); | |
616 | ||
617 | if (cl) | |
618 | return NULL; | |
6529eaba | 619 | return q->block; |
4b549a2e ED |
620 | } |
621 | ||
622 | static int fq_codel_dump_class(struct Qdisc *sch, unsigned long cl, | |
623 | struct sk_buff *skb, struct tcmsg *tcm) | |
624 | { | |
625 | tcm->tcm_handle |= TC_H_MIN(cl); | |
626 | return 0; | |
627 | } | |
628 | ||
629 | static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl, | |
630 | struct gnet_dump *d) | |
631 | { | |
632 | struct fq_codel_sched_data *q = qdisc_priv(sch); | |
633 | u32 idx = cl - 1; | |
634 | struct gnet_stats_queue qs = { 0 }; | |
635 | struct tc_fq_codel_xstats xstats; | |
636 | ||
637 | if (idx < q->flows_cnt) { | |
638 | const struct fq_codel_flow *flow = &q->flows[idx]; | |
edb09eb1 | 639 | const struct sk_buff *skb; |
4b549a2e ED |
640 | |
641 | memset(&xstats, 0, sizeof(xstats)); | |
642 | xstats.type = TCA_FQ_CODEL_XSTATS_CLASS; | |
643 | xstats.class_stats.deficit = flow->deficit; | |
644 | xstats.class_stats.ldelay = | |
645 | codel_time_to_us(flow->cvars.ldelay); | |
646 | xstats.class_stats.count = flow->cvars.count; | |
647 | xstats.class_stats.lastcount = flow->cvars.lastcount; | |
648 | xstats.class_stats.dropping = flow->cvars.dropping; | |
649 | if (flow->cvars.dropping) { | |
650 | codel_tdiff_t delta = flow->cvars.drop_next - | |
651 | codel_get_time(); | |
652 | ||
653 | xstats.class_stats.drop_next = (delta >= 0) ? | |
654 | codel_time_to_us(delta) : | |
655 | -codel_time_to_us(-delta); | |
656 | } | |
edb09eb1 ED |
657 | if (flow->head) { |
658 | sch_tree_lock(sch); | |
659 | skb = flow->head; | |
660 | while (skb) { | |
661 | qs.qlen++; | |
662 | skb = skb->next; | |
663 | } | |
664 | sch_tree_unlock(sch); | |
4b549a2e ED |
665 | } |
666 | qs.backlog = q->backlogs[idx]; | |
667 | qs.drops = flow->dropped; | |
668 | } | |
aafddbf0 | 669 | if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0) |
4b549a2e ED |
670 | return -1; |
671 | if (idx < q->flows_cnt) | |
672 | return gnet_stats_copy_app(d, &xstats, sizeof(xstats)); | |
673 | return 0; | |
674 | } | |
675 | ||
676 | static void fq_codel_walk(struct Qdisc *sch, struct qdisc_walker *arg) | |
677 | { | |
678 | struct fq_codel_sched_data *q = qdisc_priv(sch); | |
679 | unsigned int i; | |
680 | ||
681 | if (arg->stop) | |
682 | return; | |
683 | ||
684 | for (i = 0; i < q->flows_cnt; i++) { | |
685 | if (list_empty(&q->flows[i].flowchain) || | |
686 | arg->count < arg->skip) { | |
687 | arg->count++; | |
688 | continue; | |
689 | } | |
690 | if (arg->fn(sch, i + 1, arg) < 0) { | |
691 | arg->stop = 1; | |
692 | break; | |
693 | } | |
694 | arg->count++; | |
695 | } | |
696 | } | |
697 | ||
698 | static const struct Qdisc_class_ops fq_codel_class_ops = { | |
699 | .leaf = fq_codel_leaf, | |
143976ce | 700 | .find = fq_codel_find, |
6529eaba | 701 | .tcf_block = fq_codel_tcf_block, |
4b549a2e | 702 | .bind_tcf = fq_codel_bind, |
143976ce | 703 | .unbind_tcf = fq_codel_unbind, |
4b549a2e ED |
704 | .dump = fq_codel_dump_class, |
705 | .dump_stats = fq_codel_dump_class_stats, | |
706 | .walk = fq_codel_walk, | |
707 | }; | |
708 | ||
709 | static struct Qdisc_ops fq_codel_qdisc_ops __read_mostly = { | |
710 | .cl_ops = &fq_codel_class_ops, | |
711 | .id = "fq_codel", | |
712 | .priv_size = sizeof(struct fq_codel_sched_data), | |
713 | .enqueue = fq_codel_enqueue, | |
714 | .dequeue = fq_codel_dequeue, | |
715 | .peek = qdisc_peek_dequeued, | |
4b549a2e ED |
716 | .init = fq_codel_init, |
717 | .reset = fq_codel_reset, | |
718 | .destroy = fq_codel_destroy, | |
719 | .change = fq_codel_change, | |
720 | .dump = fq_codel_dump, | |
721 | .dump_stats = fq_codel_dump_stats, | |
722 | .owner = THIS_MODULE, | |
723 | }; | |
724 | ||
725 | static int __init fq_codel_module_init(void) | |
726 | { | |
727 | return register_qdisc(&fq_codel_qdisc_ops); | |
728 | } | |
729 | ||
730 | static void __exit fq_codel_module_exit(void) | |
731 | { | |
732 | unregister_qdisc(&fq_codel_qdisc_ops); | |
733 | } | |
734 | ||
735 | module_init(fq_codel_module_init) | |
736 | module_exit(fq_codel_module_exit) | |
737 | MODULE_AUTHOR("Eric Dumazet"); | |
738 | MODULE_LICENSE("GPL"); |