ehea: Fix multicast registration on semi-promiscuous mode
[linux-block.git] / net / sched / sch_sfq.c
CommitLineData
1da177e4
LT
1/*
2 * net/sched/sch_sfq.c Stochastic Fairness Queueing discipline.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 */
11
1da177e4 12#include <linux/module.h>
1da177e4
LT
13#include <linux/types.h>
14#include <linux/kernel.h>
15#include <linux/jiffies.h>
16#include <linux/string.h>
1da177e4
LT
17#include <linux/in.h>
18#include <linux/errno.h>
1da177e4 19#include <linux/init.h>
1da177e4 20#include <linux/ipv6.h>
1da177e4 21#include <linux/skbuff.h>
32740ddc 22#include <linux/jhash.h>
5a0e3ad6 23#include <linux/slab.h>
817fb15d 24#include <linux/vmalloc.h>
0ba48053
PM
25#include <net/ip.h>
26#include <net/netlink.h>
1da177e4
LT
27#include <net/pkt_sched.h>
28
29
30/* Stochastic Fairness Queuing algorithm.
31 =======================================
32
33 Source:
34 Paul E. McKenney "Stochastic Fairness Queuing",
35 IEEE INFOCOMM'90 Proceedings, San Francisco, 1990.
36
37 Paul E. McKenney "Stochastic Fairness Queuing",
38 "Interworking: Research and Experience", v.2, 1991, p.113-131.
39
40
41 See also:
42 M. Shreedhar and George Varghese "Efficient Fair
43 Queuing using Deficit Round Robin", Proc. SIGCOMM 95.
44
45
10297b99 46 This is not the thing that is usually called (W)FQ nowadays.
1da177e4
LT
47 It does not use any timestamp mechanism, but instead
48 processes queues in round-robin order.
49
50 ADVANTAGE:
51
52 - It is very cheap. Both CPU and memory requirements are minimal.
53
54 DRAWBACKS:
55
10297b99 56 - "Stochastic" -> It is not 100% fair.
1da177e4
LT
57 When hash collisions occur, several flows are considered as one.
58
59 - "Round-robin" -> It introduces larger delays than virtual clock
60 based schemes, and should not be used for isolating interactive
61 traffic from non-interactive. It means, that this scheduler
62 should be used as leaf of CBQ or P3, which put interactive traffic
63 to higher priority band.
64
65 We still need true WFQ for top level CSZ, but using WFQ
66 for the best effort traffic is absolutely pointless:
67 SFQ is superior for this purpose.
68
69 IMPLEMENTATION:
70 This implementation limits maximal queue length to 128;
eeaeb068 71 max mtu to 2^18-1; max 128 flows, number of hash buckets to 1024.
1da177e4 72 The only goal of this restrictions was that all data
eda83e3b 73 fit into one 4K page on 32bit arches.
1da177e4
LT
74
75 It is easy to increase these values, but not in flight. */
76
eda83e3b
ED
77#define SFQ_DEPTH 128 /* max number of packets per flow */
78#define SFQ_SLOTS 128 /* max number of flows */
79#define SFQ_EMPTY_SLOT 255
817fb15d
ED
80#define SFQ_DEFAULT_HASH_DIVISOR 1024
81
eeaeb068
ED
82/* We use 16 bits to store allot, and want to handle packets up to 64K
83 * Scale allot by 8 (1<<3) so that no overflow occurs.
84 */
85#define SFQ_ALLOT_SHIFT 3
86#define SFQ_ALLOT_SIZE(X) DIV_ROUND_UP(X, 1 << SFQ_ALLOT_SHIFT)
1da177e4 87
eda83e3b 88/* This type should contain at least SFQ_DEPTH + SFQ_SLOTS values */
1da177e4
LT
89typedef unsigned char sfq_index;
90
eda83e3b
ED
91/*
92 * We dont use pointers to save space.
93 * Small indexes [0 ... SFQ_SLOTS - 1] are 'pointers' to slots[] array
94 * while following values [SFQ_SLOTS ... SFQ_SLOTS + SFQ_DEPTH - 1]
95 * are 'pointers' to dep[] array
96 */
cc7ec456 97struct sfq_head {
1da177e4
LT
98 sfq_index next;
99 sfq_index prev;
100};
101
eda83e3b
ED
102struct sfq_slot {
103 struct sk_buff *skblist_next;
104 struct sk_buff *skblist_prev;
105 sfq_index qlen; /* number of skbs in skblist */
106 sfq_index next; /* next slot in sfq chain */
107 struct sfq_head dep; /* anchor in dep[] chains */
108 unsigned short hash; /* hash value (index in ht[]) */
109 short allot; /* credit for this slot */
110};
111
cc7ec456 112struct sfq_sched_data {
1da177e4
LT
113/* Parameters */
114 int perturb_period;
cc7ec456 115 unsigned int quantum; /* Allotment per round: MUST BE >= MTU */
1da177e4 116 int limit;
817fb15d 117 unsigned int divisor; /* number of slots in hash table */
1da177e4 118/* Variables */
7d2681a6 119 struct tcf_proto *filter_list;
1da177e4 120 struct timer_list perturb_timer;
32740ddc 121 u32 perturbation;
eda83e3b 122 sfq_index cur_depth; /* depth of longest slot */
eeaeb068 123 unsigned short scaled_quantum; /* SFQ_ALLOT_SIZE(quantum) */
eda83e3b 124 struct sfq_slot *tail; /* current slot in round */
817fb15d 125 sfq_index *ht; /* Hash table (divisor slots) */
eda83e3b
ED
126 struct sfq_slot slots[SFQ_SLOTS];
127 struct sfq_head dep[SFQ_DEPTH]; /* Linked list of slots, indexed by depth */
1da177e4
LT
128};
129
eda83e3b
ED
130/*
131 * sfq_head are either in a sfq_slot or in dep[] array
132 */
133static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index val)
134{
135 if (val < SFQ_SLOTS)
136 return &q->slots[val].dep;
137 return &q->dep[val - SFQ_SLOTS];
138}
139
cc7ec456 140static unsigned int sfq_fold_hash(struct sfq_sched_data *q, u32 h, u32 h1)
1da177e4 141{
817fb15d 142 return jhash_2words(h, h1, q->perturbation) & (q->divisor - 1);
1da177e4
LT
143}
144
cc7ec456 145static unsigned int sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
1da177e4
LT
146{
147 u32 h, h2;
148
149 switch (skb->protocol) {
60678040 150 case htons(ETH_P_IP):
1da177e4 151 {
f2f00981 152 const struct iphdr *iph;
b9959c2e 153 int poff;
f2f00981
CG
154
155 if (!pskb_network_may_pull(skb, sizeof(*iph)))
156 goto err;
157 iph = ip_hdr(skb);
0eae88f3
ED
158 h = (__force u32)iph->daddr;
159 h2 = (__force u32)iph->saddr ^ iph->protocol;
cc7ec456 160 if (iph->frag_off & htons(IP_MF | IP_OFFSET))
b9959c2e
CG
161 break;
162 poff = proto_ports_offset(iph->protocol);
163 if (poff >= 0 &&
164 pskb_network_may_pull(skb, iph->ihl * 4 + 4 + poff)) {
165 iph = ip_hdr(skb);
cc7ec456 166 h2 ^= *(u32 *)((void *)iph + iph->ihl * 4 + poff);
b9959c2e 167 }
1da177e4
LT
168 break;
169 }
60678040 170 case htons(ETH_P_IPV6):
1da177e4 171 {
b71d1d42 172 const struct ipv6hdr *iph;
b9959c2e 173 int poff;
f2f00981
CG
174
175 if (!pskb_network_may_pull(skb, sizeof(*iph)))
176 goto err;
177 iph = ipv6_hdr(skb);
0eae88f3
ED
178 h = (__force u32)iph->daddr.s6_addr32[3];
179 h2 = (__force u32)iph->saddr.s6_addr32[3] ^ iph->nexthdr;
b9959c2e
CG
180 poff = proto_ports_offset(iph->nexthdr);
181 if (poff >= 0 &&
182 pskb_network_may_pull(skb, sizeof(*iph) + 4 + poff)) {
183 iph = ipv6_hdr(skb);
cc7ec456 184 h2 ^= *(u32 *)((void *)iph + sizeof(*iph) + poff);
b9959c2e 185 }
1da177e4
LT
186 break;
187 }
188 default:
f2f00981 189err:
0eae88f3 190 h = (unsigned long)skb_dst(skb) ^ (__force u32)skb->protocol;
6f9e98f7 191 h2 = (unsigned long)skb->sk;
1da177e4 192 }
6f9e98f7 193
1da177e4
LT
194 return sfq_fold_hash(q, h, h2);
195}
196
7d2681a6
PM
197static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
198 int *qerr)
199{
200 struct sfq_sched_data *q = qdisc_priv(sch);
201 struct tcf_result res;
202 int result;
203
204 if (TC_H_MAJ(skb->priority) == sch->handle &&
205 TC_H_MIN(skb->priority) > 0 &&
817fb15d 206 TC_H_MIN(skb->priority) <= q->divisor)
7d2681a6
PM
207 return TC_H_MIN(skb->priority);
208
209 if (!q->filter_list)
210 return sfq_hash(q, skb) + 1;
211
c27f339a 212 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
7d2681a6
PM
213 result = tc_classify(skb, q->filter_list, &res);
214 if (result >= 0) {
215#ifdef CONFIG_NET_CLS_ACT
216 switch (result) {
217 case TC_ACT_STOLEN:
218 case TC_ACT_QUEUED:
378a2f09 219 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
7d2681a6
PM
220 case TC_ACT_SHOT:
221 return 0;
222 }
223#endif
817fb15d 224 if (TC_H_MIN(res.classid) <= q->divisor)
7d2681a6
PM
225 return TC_H_MIN(res.classid);
226 }
227 return 0;
228}
229
eda83e3b
ED
230/*
231 * x : slot number [0 .. SFQ_SLOTS - 1]
232 */
1da177e4
LT
233static inline void sfq_link(struct sfq_sched_data *q, sfq_index x)
234{
235 sfq_index p, n;
eda83e3b
ED
236 int qlen = q->slots[x].qlen;
237
238 p = qlen + SFQ_SLOTS;
239 n = q->dep[qlen].next;
1da177e4 240
eda83e3b
ED
241 q->slots[x].dep.next = n;
242 q->slots[x].dep.prev = p;
243
244 q->dep[qlen].next = x; /* sfq_dep_head(q, p)->next = x */
245 sfq_dep_head(q, n)->prev = x;
1da177e4
LT
246}
247
eda83e3b
ED
248#define sfq_unlink(q, x, n, p) \
249 n = q->slots[x].dep.next; \
250 p = q->slots[x].dep.prev; \
251 sfq_dep_head(q, p)->next = n; \
252 sfq_dep_head(q, n)->prev = p
253
254
1da177e4
LT
255static inline void sfq_dec(struct sfq_sched_data *q, sfq_index x)
256{
257 sfq_index p, n;
eda83e3b 258 int d;
1da177e4 259
eda83e3b 260 sfq_unlink(q, x, n, p);
1da177e4 261
eda83e3b
ED
262 d = q->slots[x].qlen--;
263 if (n == p && q->cur_depth == d)
264 q->cur_depth--;
1da177e4
LT
265 sfq_link(q, x);
266}
267
268static inline void sfq_inc(struct sfq_sched_data *q, sfq_index x)
269{
270 sfq_index p, n;
271 int d;
272
eda83e3b 273 sfq_unlink(q, x, n, p);
1da177e4 274
eda83e3b
ED
275 d = ++q->slots[x].qlen;
276 if (q->cur_depth < d)
277 q->cur_depth = d;
1da177e4
LT
278 sfq_link(q, x);
279}
280
eda83e3b
ED
281/* helper functions : might be changed when/if skb use a standard list_head */
282
283/* remove one skb from tail of slot queue */
284static inline struct sk_buff *slot_dequeue_tail(struct sfq_slot *slot)
285{
286 struct sk_buff *skb = slot->skblist_prev;
287
288 slot->skblist_prev = skb->prev;
ee09b3c1 289 skb->prev->next = (struct sk_buff *)slot;
eda83e3b
ED
290 skb->next = skb->prev = NULL;
291 return skb;
292}
293
294/* remove one skb from head of slot queue */
295static inline struct sk_buff *slot_dequeue_head(struct sfq_slot *slot)
296{
297 struct sk_buff *skb = slot->skblist_next;
298
299 slot->skblist_next = skb->next;
18c8d82a 300 skb->next->prev = (struct sk_buff *)slot;
eda83e3b
ED
301 skb->next = skb->prev = NULL;
302 return skb;
303}
304
305static inline void slot_queue_init(struct sfq_slot *slot)
306{
307 slot->skblist_prev = slot->skblist_next = (struct sk_buff *)slot;
308}
309
310/* add skb to slot queue (tail add) */
311static inline void slot_queue_add(struct sfq_slot *slot, struct sk_buff *skb)
312{
313 skb->prev = slot->skblist_prev;
314 skb->next = (struct sk_buff *)slot;
315 slot->skblist_prev->next = skb;
316 slot->skblist_prev = skb;
317}
318
319#define slot_queue_walk(slot, skb) \
320 for (skb = slot->skblist_next; \
321 skb != (struct sk_buff *)slot; \
322 skb = skb->next)
323
1da177e4
LT
324static unsigned int sfq_drop(struct Qdisc *sch)
325{
326 struct sfq_sched_data *q = qdisc_priv(sch);
eda83e3b 327 sfq_index x, d = q->cur_depth;
1da177e4
LT
328 struct sk_buff *skb;
329 unsigned int len;
eda83e3b 330 struct sfq_slot *slot;
1da177e4 331
eda83e3b 332 /* Queue is full! Find the longest slot and drop tail packet from it */
1da177e4 333 if (d > 1) {
eda83e3b
ED
334 x = q->dep[d].next;
335 slot = &q->slots[x];
336drop:
337 skb = slot_dequeue_tail(slot);
0abf77e5 338 len = qdisc_pkt_len(skb);
1da177e4 339 sfq_dec(q, x);
eda83e3b 340 kfree_skb(skb);
1da177e4
LT
341 sch->q.qlen--;
342 sch->qstats.drops++;
f5539eb8 343 sch->qstats.backlog -= len;
1da177e4
LT
344 return len;
345 }
346
347 if (d == 1) {
348 /* It is difficult to believe, but ALL THE SLOTS HAVE LENGTH 1. */
eda83e3b
ED
349 x = q->tail->next;
350 slot = &q->slots[x];
351 q->tail->next = slot->next;
352 q->ht[slot->hash] = SFQ_EMPTY_SLOT;
353 goto drop;
1da177e4
LT
354 }
355
356 return 0;
357}
358
359static int
6f9e98f7 360sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
1da177e4
LT
361{
362 struct sfq_sched_data *q = qdisc_priv(sch);
7d2681a6 363 unsigned int hash;
1da177e4 364 sfq_index x;
eda83e3b 365 struct sfq_slot *slot;
7f3ff4f6 366 int uninitialized_var(ret);
7d2681a6
PM
367
368 hash = sfq_classify(skb, sch, &ret);
369 if (hash == 0) {
c27f339a 370 if (ret & __NET_XMIT_BYPASS)
7d2681a6
PM
371 sch->qstats.drops++;
372 kfree_skb(skb);
373 return ret;
374 }
375 hash--;
1da177e4
LT
376
377 x = q->ht[hash];
eda83e3b
ED
378 slot = &q->slots[x];
379 if (x == SFQ_EMPTY_SLOT) {
380 x = q->dep[0].next; /* get a free slot */
381 q->ht[hash] = x;
382 slot = &q->slots[x];
383 slot->hash = hash;
1da177e4 384 }
6f9e98f7 385
eda83e3b 386 /* If selected queue has length q->limit, do simple tail drop,
32740ddc
AK
387 * i.e. drop _this_ packet.
388 */
eda83e3b 389 if (slot->qlen >= q->limit)
32740ddc
AK
390 return qdisc_drop(skb, sch);
391
0abf77e5 392 sch->qstats.backlog += qdisc_pkt_len(skb);
eda83e3b 393 slot_queue_add(slot, skb);
1da177e4 394 sfq_inc(q, x);
eda83e3b
ED
395 if (slot->qlen == 1) { /* The flow is new */
396 if (q->tail == NULL) { /* It is the first flow */
397 slot->next = x;
1da177e4 398 } else {
eda83e3b
ED
399 slot->next = q->tail->next;
400 q->tail->next = x;
1da177e4 401 }
eda83e3b 402 q->tail = slot;
eeaeb068 403 slot->allot = q->scaled_quantum;
1da177e4 404 }
9190b3b3 405 if (++sch->q.qlen <= q->limit)
9871e50e 406 return NET_XMIT_SUCCESS;
1da177e4
LT
407
408 sfq_drop(sch);
409 return NET_XMIT_CN;
410}
411
48a8f519
PM
412static struct sk_buff *
413sfq_peek(struct Qdisc *sch)
414{
415 struct sfq_sched_data *q = qdisc_priv(sch);
1da177e4 416
48a8f519 417 /* No active slots */
eda83e3b 418 if (q->tail == NULL)
48a8f519 419 return NULL;
1da177e4 420
eda83e3b 421 return q->slots[q->tail->next].skblist_next;
48a8f519 422}
1da177e4
LT
423
424static struct sk_buff *
6f9e98f7 425sfq_dequeue(struct Qdisc *sch)
1da177e4
LT
426{
427 struct sfq_sched_data *q = qdisc_priv(sch);
428 struct sk_buff *skb;
aa3e2199 429 sfq_index a, next_a;
eda83e3b 430 struct sfq_slot *slot;
1da177e4
LT
431
432 /* No active slots */
eda83e3b 433 if (q->tail == NULL)
1da177e4
LT
434 return NULL;
435
eeaeb068 436next_slot:
eda83e3b
ED
437 a = q->tail->next;
438 slot = &q->slots[a];
eeaeb068
ED
439 if (slot->allot <= 0) {
440 q->tail = slot;
441 slot->allot += q->scaled_quantum;
442 goto next_slot;
443 }
eda83e3b 444 skb = slot_dequeue_head(slot);
1da177e4 445 sfq_dec(q, a);
9190b3b3 446 qdisc_bstats_update(sch, skb);
1da177e4 447 sch->q.qlen--;
0abf77e5 448 sch->qstats.backlog -= qdisc_pkt_len(skb);
1da177e4
LT
449
450 /* Is the slot empty? */
eda83e3b
ED
451 if (slot->qlen == 0) {
452 q->ht[slot->hash] = SFQ_EMPTY_SLOT;
453 next_a = slot->next;
aa3e2199 454 if (a == next_a) {
eda83e3b 455 q->tail = NULL; /* no more active slots */
1da177e4
LT
456 return skb;
457 }
eda83e3b 458 q->tail->next = next_a;
eeaeb068
ED
459 } else {
460 slot->allot -= SFQ_ALLOT_SIZE(qdisc_pkt_len(skb));
1da177e4
LT
461 }
462 return skb;
463}
464
465static void
6f9e98f7 466sfq_reset(struct Qdisc *sch)
1da177e4
LT
467{
468 struct sk_buff *skb;
469
470 while ((skb = sfq_dequeue(sch)) != NULL)
471 kfree_skb(skb);
472}
473
474static void sfq_perturbation(unsigned long arg)
475{
6f9e98f7 476 struct Qdisc *sch = (struct Qdisc *)arg;
1da177e4
LT
477 struct sfq_sched_data *q = qdisc_priv(sch);
478
d46f8dd8 479 q->perturbation = net_random();
1da177e4 480
32740ddc
AK
481 if (q->perturb_period)
482 mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
1da177e4
LT
483}
484
1e90474c 485static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
1da177e4
LT
486{
487 struct sfq_sched_data *q = qdisc_priv(sch);
1e90474c 488 struct tc_sfq_qopt *ctl = nla_data(opt);
5e50da01 489 unsigned int qlen;
1da177e4 490
1e90474c 491 if (opt->nla_len < nla_attr_size(sizeof(*ctl)))
1da177e4
LT
492 return -EINVAL;
493
119b3d38 494 if (ctl->divisor &&
495 (!is_power_of_2(ctl->divisor) || ctl->divisor > 65536))
496 return -EINVAL;
497
1da177e4 498 sch_tree_lock(sch);
5ce2d488 499 q->quantum = ctl->quantum ? : psched_mtu(qdisc_dev(sch));
eeaeb068 500 q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
6f9e98f7 501 q->perturb_period = ctl->perturb_period * HZ;
1da177e4 502 if (ctl->limit)
32740ddc 503 q->limit = min_t(u32, ctl->limit, SFQ_DEPTH - 1);
119b3d38 504 if (ctl->divisor)
817fb15d 505 q->divisor = ctl->divisor;
5e50da01 506 qlen = sch->q.qlen;
5588b40d 507 while (sch->q.qlen > q->limit)
1da177e4 508 sfq_drop(sch);
5e50da01 509 qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
1da177e4
LT
510
511 del_timer(&q->perturb_timer);
512 if (q->perturb_period) {
32740ddc 513 mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
d46f8dd8 514 q->perturbation = net_random();
1da177e4
LT
515 }
516 sch_tree_unlock(sch);
517 return 0;
518}
519
1e90474c 520static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
1da177e4
LT
521{
522 struct sfq_sched_data *q = qdisc_priv(sch);
817fb15d 523 size_t sz;
1da177e4
LT
524 int i;
525
d3e99483 526 q->perturb_timer.function = sfq_perturbation;
c19a28e1 527 q->perturb_timer.data = (unsigned long)sch;
d3e99483 528 init_timer_deferrable(&q->perturb_timer);
1da177e4 529
6f9e98f7 530 for (i = 0; i < SFQ_DEPTH; i++) {
eda83e3b
ED
531 q->dep[i].next = i + SFQ_SLOTS;
532 q->dep[i].prev = i + SFQ_SLOTS;
1da177e4 533 }
6f9e98f7 534
32740ddc 535 q->limit = SFQ_DEPTH - 1;
eda83e3b
ED
536 q->cur_depth = 0;
537 q->tail = NULL;
817fb15d 538 q->divisor = SFQ_DEFAULT_HASH_DIVISOR;
1da177e4 539 if (opt == NULL) {
5ce2d488 540 q->quantum = psched_mtu(qdisc_dev(sch));
eeaeb068 541 q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
1da177e4 542 q->perturb_period = 0;
d46f8dd8 543 q->perturbation = net_random();
1da177e4
LT
544 } else {
545 int err = sfq_change(sch, opt);
546 if (err)
547 return err;
548 }
6f9e98f7 549
817fb15d
ED
550 sz = sizeof(q->ht[0]) * q->divisor;
551 q->ht = kmalloc(sz, GFP_KERNEL);
552 if (!q->ht && sz > PAGE_SIZE)
553 q->ht = vmalloc(sz);
554 if (!q->ht)
555 return -ENOMEM;
556 for (i = 0; i < q->divisor; i++)
557 q->ht[i] = SFQ_EMPTY_SLOT;
558
18c8d82a
ED
559 for (i = 0; i < SFQ_SLOTS; i++) {
560 slot_queue_init(&q->slots[i]);
1da177e4 561 sfq_link(q, i);
18c8d82a 562 }
23624935
ED
563 if (q->limit >= 1)
564 sch->flags |= TCQ_F_CAN_BYPASS;
565 else
566 sch->flags &= ~TCQ_F_CAN_BYPASS;
1da177e4
LT
567 return 0;
568}
569
570static void sfq_destroy(struct Qdisc *sch)
571{
572 struct sfq_sched_data *q = qdisc_priv(sch);
7d2681a6 573
ff31ab56 574 tcf_destroy_chain(&q->filter_list);
980c478d
JP
575 q->perturb_period = 0;
576 del_timer_sync(&q->perturb_timer);
817fb15d
ED
577 if (is_vmalloc_addr(q->ht))
578 vfree(q->ht);
579 else
580 kfree(q->ht);
1da177e4
LT
581}
582
583static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb)
584{
585 struct sfq_sched_data *q = qdisc_priv(sch);
27a884dc 586 unsigned char *b = skb_tail_pointer(skb);
1da177e4
LT
587 struct tc_sfq_qopt opt;
588
589 opt.quantum = q->quantum;
6f9e98f7 590 opt.perturb_period = q->perturb_period / HZ;
1da177e4
LT
591
592 opt.limit = q->limit;
817fb15d 593 opt.divisor = q->divisor;
cdec7e50 594 opt.flows = q->limit;
1da177e4 595
1e90474c 596 NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
1da177e4
LT
597
598 return skb->len;
599
1e90474c 600nla_put_failure:
dc5fc579 601 nlmsg_trim(skb, b);
1da177e4
LT
602 return -1;
603}
604
41065fba
JP
605static struct Qdisc *sfq_leaf(struct Qdisc *sch, unsigned long arg)
606{
607 return NULL;
608}
609
7d2681a6
PM
610static unsigned long sfq_get(struct Qdisc *sch, u32 classid)
611{
612 return 0;
613}
614
eb4a5527
JP
615static unsigned long sfq_bind(struct Qdisc *sch, unsigned long parent,
616 u32 classid)
617{
23624935
ED
618 /* we cannot bypass queue discipline anymore */
619 sch->flags &= ~TCQ_F_CAN_BYPASS;
eb4a5527
JP
620 return 0;
621}
622
da7115d9
JP
623static void sfq_put(struct Qdisc *q, unsigned long cl)
624{
625}
626
7d2681a6
PM
627static struct tcf_proto **sfq_find_tcf(struct Qdisc *sch, unsigned long cl)
628{
629 struct sfq_sched_data *q = qdisc_priv(sch);
630
631 if (cl)
632 return NULL;
633 return &q->filter_list;
634}
635
94de78d1
PM
636static int sfq_dump_class(struct Qdisc *sch, unsigned long cl,
637 struct sk_buff *skb, struct tcmsg *tcm)
638{
639 tcm->tcm_handle |= TC_H_MIN(cl);
640 return 0;
641}
642
643static int sfq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
644 struct gnet_dump *d)
645{
646 struct sfq_sched_data *q = qdisc_priv(sch);
ee09b3c1
ED
647 sfq_index idx = q->ht[cl - 1];
648 struct gnet_stats_queue qs = { 0 };
649 struct tc_sfq_xstats xstats = { 0 };
c4266263
ED
650 struct sk_buff *skb;
651
ee09b3c1
ED
652 if (idx != SFQ_EMPTY_SLOT) {
653 const struct sfq_slot *slot = &q->slots[idx];
94de78d1 654
eeaeb068 655 xstats.allot = slot->allot << SFQ_ALLOT_SHIFT;
ee09b3c1
ED
656 qs.qlen = slot->qlen;
657 slot_queue_walk(slot, skb)
658 qs.backlog += qdisc_pkt_len(skb);
659 }
94de78d1
PM
660 if (gnet_stats_copy_queue(d, &qs) < 0)
661 return -1;
662 return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
663}
664
7d2681a6
PM
665static void sfq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
666{
94de78d1
PM
667 struct sfq_sched_data *q = qdisc_priv(sch);
668 unsigned int i;
669
670 if (arg->stop)
671 return;
672
817fb15d 673 for (i = 0; i < q->divisor; i++) {
eda83e3b 674 if (q->ht[i] == SFQ_EMPTY_SLOT ||
94de78d1
PM
675 arg->count < arg->skip) {
676 arg->count++;
677 continue;
678 }
679 if (arg->fn(sch, i + 1, arg) < 0) {
680 arg->stop = 1;
681 break;
682 }
683 arg->count++;
684 }
7d2681a6
PM
685}
686
687static const struct Qdisc_class_ops sfq_class_ops = {
41065fba 688 .leaf = sfq_leaf,
7d2681a6 689 .get = sfq_get,
da7115d9 690 .put = sfq_put,
7d2681a6 691 .tcf_chain = sfq_find_tcf,
eb4a5527 692 .bind_tcf = sfq_bind,
da7115d9 693 .unbind_tcf = sfq_put,
94de78d1
PM
694 .dump = sfq_dump_class,
695 .dump_stats = sfq_dump_class_stats,
7d2681a6
PM
696 .walk = sfq_walk,
697};
698
20fea08b 699static struct Qdisc_ops sfq_qdisc_ops __read_mostly = {
7d2681a6 700 .cl_ops = &sfq_class_ops,
1da177e4
LT
701 .id = "sfq",
702 .priv_size = sizeof(struct sfq_sched_data),
703 .enqueue = sfq_enqueue,
704 .dequeue = sfq_dequeue,
48a8f519 705 .peek = sfq_peek,
1da177e4
LT
706 .drop = sfq_drop,
707 .init = sfq_init,
708 .reset = sfq_reset,
709 .destroy = sfq_destroy,
710 .change = NULL,
711 .dump = sfq_dump,
712 .owner = THIS_MODULE,
713};
714
715static int __init sfq_module_init(void)
716{
717 return register_qdisc(&sfq_qdisc_ops);
718}
10297b99 719static void __exit sfq_module_exit(void)
1da177e4
LT
720{
721 unregister_qdisc(&sfq_qdisc_ops);
722}
723module_init(sfq_module_init)
724module_exit(sfq_module_exit)
725MODULE_LICENSE("GPL");