net: rose: mark expected switch fall-throughs
[linux-2.6-block.git] / net / sched / sch_sfq.c
CommitLineData
1da177e4
LT
1/*
2 * net/sched/sch_sfq.c Stochastic Fairness Queueing discipline.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 */
11
1da177e4 12#include <linux/module.h>
1da177e4
LT
13#include <linux/types.h>
14#include <linux/kernel.h>
15#include <linux/jiffies.h>
16#include <linux/string.h>
1da177e4
LT
17#include <linux/in.h>
18#include <linux/errno.h>
1da177e4 19#include <linux/init.h>
1da177e4 20#include <linux/skbuff.h>
32740ddc 21#include <linux/jhash.h>
5a0e3ad6 22#include <linux/slab.h>
817fb15d 23#include <linux/vmalloc.h>
0ba48053 24#include <net/netlink.h>
1da177e4 25#include <net/pkt_sched.h>
cf1facda 26#include <net/pkt_cls.h>
ddecf0f4 27#include <net/red.h>
1da177e4
LT
28
29
30/* Stochastic Fairness Queuing algorithm.
31 =======================================
32
33 Source:
34 Paul E. McKenney "Stochastic Fairness Queuing",
35 IEEE INFOCOMM'90 Proceedings, San Francisco, 1990.
36
37 Paul E. McKenney "Stochastic Fairness Queuing",
38 "Interworking: Research and Experience", v.2, 1991, p.113-131.
39
40
41 See also:
42 M. Shreedhar and George Varghese "Efficient Fair
43 Queuing using Deficit Round Robin", Proc. SIGCOMM 95.
44
45
10297b99 46 This is not the thing that is usually called (W)FQ nowadays.
1da177e4
LT
47 It does not use any timestamp mechanism, but instead
48 processes queues in round-robin order.
49
50 ADVANTAGE:
51
52 - It is very cheap. Both CPU and memory requirements are minimal.
53
54 DRAWBACKS:
55
10297b99 56 - "Stochastic" -> It is not 100% fair.
1da177e4
LT
57 When hash collisions occur, several flows are considered as one.
58
59 - "Round-robin" -> It introduces larger delays than virtual clock
60 based schemes, and should not be used for isolating interactive
61 traffic from non-interactive. It means, that this scheduler
62 should be used as leaf of CBQ or P3, which put interactive traffic
63 to higher priority band.
64
65 We still need true WFQ for top level CSZ, but using WFQ
66 for the best effort traffic is absolutely pointless:
67 SFQ is superior for this purpose.
68
69 IMPLEMENTATION:
18cb8098
ED
70 This implementation limits :
71 - maximal queue length per flow to 127 packets.
72 - max mtu to 2^18-1;
73 - max 65408 flows,
74 - number of hash buckets to 65536.
1da177e4
LT
75
76 It is easy to increase these values, but not in flight. */
77
18cb8098
ED
78#define SFQ_MAX_DEPTH 127 /* max number of packets per flow */
79#define SFQ_DEFAULT_FLOWS 128
80#define SFQ_MAX_FLOWS (0x10000 - SFQ_MAX_DEPTH - 1) /* max number of flows */
81#define SFQ_EMPTY_SLOT 0xffff
817fb15d
ED
82#define SFQ_DEFAULT_HASH_DIVISOR 1024
83
eeaeb068
ED
84/* We use 16 bits to store allot, and want to handle packets up to 64K
85 * Scale allot by 8 (1<<3) so that no overflow occurs.
86 */
87#define SFQ_ALLOT_SHIFT 3
88#define SFQ_ALLOT_SIZE(X) DIV_ROUND_UP(X, 1 << SFQ_ALLOT_SHIFT)
1da177e4 89
18cb8098
ED
90/* This type should contain at least SFQ_MAX_DEPTH + 1 + SFQ_MAX_FLOWS values */
91typedef u16 sfq_index;
1da177e4 92
eda83e3b
ED
93/*
94 * We dont use pointers to save space.
18cb8098
ED
95 * Small indexes [0 ... SFQ_MAX_FLOWS - 1] are 'pointers' to slots[] array
96 * while following values [SFQ_MAX_FLOWS ... SFQ_MAX_FLOWS + SFQ_MAX_DEPTH]
eda83e3b
ED
97 * are 'pointers' to dep[] array
98 */
cc7ec456 99struct sfq_head {
1da177e4
LT
100 sfq_index next;
101 sfq_index prev;
102};
103
eda83e3b
ED
104struct sfq_slot {
105 struct sk_buff *skblist_next;
106 struct sk_buff *skblist_prev;
107 sfq_index qlen; /* number of skbs in skblist */
18cb8098 108 sfq_index next; /* next slot in sfq RR chain */
eda83e3b
ED
109 struct sfq_head dep; /* anchor in dep[] chains */
110 unsigned short hash; /* hash value (index in ht[]) */
111 short allot; /* credit for this slot */
ddecf0f4
ED
112
113 unsigned int backlog;
114 struct red_vars vars;
eda83e3b
ED
115};
116
cc7ec456 117struct sfq_sched_data {
18cb8098
ED
118/* frequently used fields */
119 int limit; /* limit of total number of packets in this qdisc */
817fb15d 120 unsigned int divisor; /* number of slots in hash table */
ddecf0f4
ED
121 u8 headdrop;
122 u8 maxdepth; /* limit of packets per flow */
18cb8098 123
32740ddc 124 u32 perturbation;
ddecf0f4
ED
125 u8 cur_depth; /* depth of longest slot */
126 u8 flags;
eeaeb068 127 unsigned short scaled_quantum; /* SFQ_ALLOT_SIZE(quantum) */
25d8c0d5 128 struct tcf_proto __rcu *filter_list;
6529eaba 129 struct tcf_block *block;
18cb8098
ED
130 sfq_index *ht; /* Hash table ('divisor' slots) */
131 struct sfq_slot *slots; /* Flows table ('maxflows' entries) */
132
ddecf0f4
ED
133 struct red_parms *red_parms;
134 struct tc_sfqred_stats stats;
135 struct sfq_slot *tail; /* current slot in round */
136
18cb8098
ED
137 struct sfq_head dep[SFQ_MAX_DEPTH + 1];
138 /* Linked lists of slots, indexed by depth
139 * dep[0] : list of unused flows
140 * dep[1] : list of flows with 1 packet
141 * dep[X] : list of flows with X packets
142 */
143
ddecf0f4 144 unsigned int maxflows; /* number of flows in flows array */
18cb8098
ED
145 int perturb_period;
146 unsigned int quantum; /* Allotment per round: MUST BE >= MTU */
147 struct timer_list perturb_timer;
cdeabbb8 148 struct Qdisc *sch;
1da177e4
LT
149};
150
eda83e3b
ED
151/*
152 * sfq_head are either in a sfq_slot or in dep[] array
153 */
154static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index val)
155{
18cb8098 156 if (val < SFQ_MAX_FLOWS)
eda83e3b 157 return &q->slots[val].dep;
18cb8098 158 return &q->dep[val - SFQ_MAX_FLOWS];
eda83e3b
ED
159}
160
11fca931
ED
161static unsigned int sfq_hash(const struct sfq_sched_data *q,
162 const struct sk_buff *skb)
1da177e4 163{
ada1dba0 164 return skb_get_hash_perturb(skb, q->perturbation) & (q->divisor - 1);
1da177e4
LT
165}
166
7d2681a6
PM
167static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
168 int *qerr)
169{
170 struct sfq_sched_data *q = qdisc_priv(sch);
171 struct tcf_result res;
25d8c0d5 172 struct tcf_proto *fl;
7d2681a6
PM
173 int result;
174
175 if (TC_H_MAJ(skb->priority) == sch->handle &&
176 TC_H_MIN(skb->priority) > 0 &&
817fb15d 177 TC_H_MIN(skb->priority) <= q->divisor)
7d2681a6
PM
178 return TC_H_MIN(skb->priority);
179
25d8c0d5 180 fl = rcu_dereference_bh(q->filter_list);
ada1dba0 181 if (!fl)
7d2681a6
PM
182 return sfq_hash(q, skb) + 1;
183
c27f339a 184 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
87d83093 185 result = tcf_classify(skb, fl, &res, false);
7d2681a6
PM
186 if (result >= 0) {
187#ifdef CONFIG_NET_CLS_ACT
188 switch (result) {
189 case TC_ACT_STOLEN:
190 case TC_ACT_QUEUED:
e25ea21f 191 case TC_ACT_TRAP:
378a2f09 192 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
7d2681a6
PM
193 case TC_ACT_SHOT:
194 return 0;
195 }
196#endif
817fb15d 197 if (TC_H_MIN(res.classid) <= q->divisor)
7d2681a6
PM
198 return TC_H_MIN(res.classid);
199 }
200 return 0;
201}
202
eda83e3b 203/*
18cb8098 204 * x : slot number [0 .. SFQ_MAX_FLOWS - 1]
eda83e3b 205 */
1da177e4
LT
206static inline void sfq_link(struct sfq_sched_data *q, sfq_index x)
207{
208 sfq_index p, n;
18cb8098
ED
209 struct sfq_slot *slot = &q->slots[x];
210 int qlen = slot->qlen;
eda83e3b 211
18cb8098 212 p = qlen + SFQ_MAX_FLOWS;
eda83e3b 213 n = q->dep[qlen].next;
1da177e4 214
18cb8098
ED
215 slot->dep.next = n;
216 slot->dep.prev = p;
eda83e3b
ED
217
218 q->dep[qlen].next = x; /* sfq_dep_head(q, p)->next = x */
219 sfq_dep_head(q, n)->prev = x;
1da177e4
LT
220}
221
eda83e3b 222#define sfq_unlink(q, x, n, p) \
fa08943b
YY
223 do { \
224 n = q->slots[x].dep.next; \
225 p = q->slots[x].dep.prev; \
226 sfq_dep_head(q, p)->next = n; \
227 sfq_dep_head(q, n)->prev = p; \
228 } while (0)
eda83e3b
ED
229
230
1da177e4
LT
231static inline void sfq_dec(struct sfq_sched_data *q, sfq_index x)
232{
233 sfq_index p, n;
eda83e3b 234 int d;
1da177e4 235
eda83e3b 236 sfq_unlink(q, x, n, p);
1da177e4 237
eda83e3b
ED
238 d = q->slots[x].qlen--;
239 if (n == p && q->cur_depth == d)
240 q->cur_depth--;
1da177e4
LT
241 sfq_link(q, x);
242}
243
244static inline void sfq_inc(struct sfq_sched_data *q, sfq_index x)
245{
246 sfq_index p, n;
247 int d;
248
eda83e3b 249 sfq_unlink(q, x, n, p);
1da177e4 250
eda83e3b
ED
251 d = ++q->slots[x].qlen;
252 if (q->cur_depth < d)
253 q->cur_depth = d;
1da177e4
LT
254 sfq_link(q, x);
255}
256
eda83e3b
ED
257/* helper functions : might be changed when/if skb use a standard list_head */
258
259/* remove one skb from tail of slot queue */
260static inline struct sk_buff *slot_dequeue_tail(struct sfq_slot *slot)
261{
262 struct sk_buff *skb = slot->skblist_prev;
263
264 slot->skblist_prev = skb->prev;
ee09b3c1 265 skb->prev->next = (struct sk_buff *)slot;
eda83e3b
ED
266 skb->next = skb->prev = NULL;
267 return skb;
268}
269
270/* remove one skb from head of slot queue */
271static inline struct sk_buff *slot_dequeue_head(struct sfq_slot *slot)
272{
273 struct sk_buff *skb = slot->skblist_next;
274
275 slot->skblist_next = skb->next;
18c8d82a 276 skb->next->prev = (struct sk_buff *)slot;
eda83e3b
ED
277 skb->next = skb->prev = NULL;
278 return skb;
279}
280
281static inline void slot_queue_init(struct sfq_slot *slot)
282{
18cb8098 283 memset(slot, 0, sizeof(*slot));
eda83e3b
ED
284 slot->skblist_prev = slot->skblist_next = (struct sk_buff *)slot;
285}
286
287/* add skb to slot queue (tail add) */
288static inline void slot_queue_add(struct sfq_slot *slot, struct sk_buff *skb)
289{
290 skb->prev = slot->skblist_prev;
291 skb->next = (struct sk_buff *)slot;
292 slot->skblist_prev->next = skb;
293 slot->skblist_prev = skb;
294}
295
f9ab7425 296static unsigned int sfq_drop(struct Qdisc *sch, struct sk_buff **to_free)
1da177e4
LT
297{
298 struct sfq_sched_data *q = qdisc_priv(sch);
eda83e3b 299 sfq_index x, d = q->cur_depth;
1da177e4
LT
300 struct sk_buff *skb;
301 unsigned int len;
eda83e3b 302 struct sfq_slot *slot;
1da177e4 303
eda83e3b 304 /* Queue is full! Find the longest slot and drop tail packet from it */
1da177e4 305 if (d > 1) {
eda83e3b
ED
306 x = q->dep[d].next;
307 slot = &q->slots[x];
308drop:
18cb8098 309 skb = q->headdrop ? slot_dequeue_head(slot) : slot_dequeue_tail(slot);
0abf77e5 310 len = qdisc_pkt_len(skb);
ddecf0f4 311 slot->backlog -= len;
1da177e4
LT
312 sfq_dec(q, x);
313 sch->q.qlen--;
25331d6c 314 qdisc_qstats_backlog_dec(sch, skb);
f9ab7425 315 qdisc_drop(skb, sch, to_free);
1da177e4
LT
316 return len;
317 }
318
319 if (d == 1) {
320 /* It is difficult to believe, but ALL THE SLOTS HAVE LENGTH 1. */
eda83e3b
ED
321 x = q->tail->next;
322 slot = &q->slots[x];
323 q->tail->next = slot->next;
324 q->ht[slot->hash] = SFQ_EMPTY_SLOT;
325 goto drop;
1da177e4
LT
326 }
327
328 return 0;
329}
330
ddecf0f4
ED
331/* Is ECN parameter configured */
332static int sfq_prob_mark(const struct sfq_sched_data *q)
333{
334 return q->flags & TC_RED_ECN;
335}
336
337/* Should packets over max threshold just be marked */
338static int sfq_hard_mark(const struct sfq_sched_data *q)
339{
340 return (q->flags & (TC_RED_ECN | TC_RED_HARDDROP)) == TC_RED_ECN;
341}
342
343static int sfq_headdrop(const struct sfq_sched_data *q)
344{
345 return q->headdrop;
346}
347
1da177e4 348static int
520ac30f 349sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
1da177e4
LT
350{
351 struct sfq_sched_data *q = qdisc_priv(sch);
2ccccf5f 352 unsigned int hash, dropped;
8efa8854 353 sfq_index x, qlen;
eda83e3b 354 struct sfq_slot *slot;
7f3ff4f6 355 int uninitialized_var(ret);
ddecf0f4
ED
356 struct sk_buff *head;
357 int delta;
7d2681a6
PM
358
359 hash = sfq_classify(skb, sch, &ret);
360 if (hash == 0) {
c27f339a 361 if (ret & __NET_XMIT_BYPASS)
25331d6c 362 qdisc_qstats_drop(sch);
f9ab7425 363 __qdisc_drop(skb, to_free);
7d2681a6
PM
364 return ret;
365 }
366 hash--;
1da177e4
LT
367
368 x = q->ht[hash];
eda83e3b
ED
369 slot = &q->slots[x];
370 if (x == SFQ_EMPTY_SLOT) {
371 x = q->dep[0].next; /* get a free slot */
18cb8098 372 if (x >= SFQ_MAX_FLOWS)
520ac30f 373 return qdisc_drop(skb, sch, to_free);
eda83e3b
ED
374 q->ht[hash] = x;
375 slot = &q->slots[x];
376 slot->hash = hash;
ddecf0f4
ED
377 slot->backlog = 0; /* should already be 0 anyway... */
378 red_set_vars(&slot->vars);
379 goto enqueue;
380 }
381 if (q->red_parms) {
382 slot->vars.qavg = red_calc_qavg_no_idle_time(q->red_parms,
383 &slot->vars,
384 slot->backlog);
385 switch (red_action(q->red_parms,
386 &slot->vars,
387 slot->vars.qavg)) {
388 case RED_DONT_MARK:
389 break;
390
391 case RED_PROB_MARK:
25331d6c 392 qdisc_qstats_overlimit(sch);
ddecf0f4
ED
393 if (sfq_prob_mark(q)) {
394 /* We know we have at least one packet in queue */
395 if (sfq_headdrop(q) &&
396 INET_ECN_set_ce(slot->skblist_next)) {
397 q->stats.prob_mark_head++;
398 break;
399 }
400 if (INET_ECN_set_ce(skb)) {
401 q->stats.prob_mark++;
402 break;
403 }
404 }
405 q->stats.prob_drop++;
406 goto congestion_drop;
407
408 case RED_HARD_MARK:
25331d6c 409 qdisc_qstats_overlimit(sch);
ddecf0f4
ED
410 if (sfq_hard_mark(q)) {
411 /* We know we have at least one packet in queue */
412 if (sfq_headdrop(q) &&
413 INET_ECN_set_ce(slot->skblist_next)) {
414 q->stats.forced_mark_head++;
415 break;
416 }
417 if (INET_ECN_set_ce(skb)) {
418 q->stats.forced_mark++;
419 break;
420 }
421 }
422 q->stats.forced_drop++;
423 goto congestion_drop;
424 }
1da177e4 425 }
6f9e98f7 426
18cb8098 427 if (slot->qlen >= q->maxdepth) {
ddecf0f4
ED
428congestion_drop:
429 if (!sfq_headdrop(q))
520ac30f 430 return qdisc_drop(skb, sch, to_free);
18cb8098 431
ddecf0f4 432 /* We know we have at least one packet in queue */
18cb8098 433 head = slot_dequeue_head(slot);
ddecf0f4
ED
434 delta = qdisc_pkt_len(head) - qdisc_pkt_len(skb);
435 sch->qstats.backlog -= delta;
436 slot->backlog -= delta;
520ac30f 437 qdisc_drop(head, sch, to_free);
18cb8098 438
18cb8098 439 slot_queue_add(slot, skb);
325d5dc3 440 qdisc_tree_reduce_backlog(sch, 0, delta);
18cb8098
ED
441 return NET_XMIT_CN;
442 }
32740ddc 443
ddecf0f4 444enqueue:
25331d6c 445 qdisc_qstats_backlog_inc(sch, skb);
ddecf0f4 446 slot->backlog += qdisc_pkt_len(skb);
eda83e3b 447 slot_queue_add(slot, skb);
1da177e4 448 sfq_inc(q, x);
eda83e3b
ED
449 if (slot->qlen == 1) { /* The flow is new */
450 if (q->tail == NULL) { /* It is the first flow */
451 slot->next = x;
1da177e4 452 } else {
eda83e3b
ED
453 slot->next = q->tail->next;
454 q->tail->next = x;
1da177e4 455 }
cc34eb67
ED
456 /* We put this flow at the end of our flow list.
457 * This might sound unfair for a new flow to wait after old ones,
458 * but we could endup servicing new flows only, and freeze old ones.
459 */
460 q->tail = slot;
ddecf0f4 461 /* We could use a bigger initial quantum for new flows */
eeaeb068 462 slot->allot = q->scaled_quantum;
1da177e4 463 }
9190b3b3 464 if (++sch->q.qlen <= q->limit)
9871e50e 465 return NET_XMIT_SUCCESS;
1da177e4 466
8efa8854 467 qlen = slot->qlen;
f9ab7425 468 dropped = sfq_drop(sch, to_free);
8efa8854
ED
469 /* Return Congestion Notification only if we dropped a packet
470 * from this flow.
471 */
325d5dc3
KK
472 if (qlen != slot->qlen) {
473 qdisc_tree_reduce_backlog(sch, 0, dropped - qdisc_pkt_len(skb));
e1738bd9 474 return NET_XMIT_CN;
325d5dc3 475 }
e1738bd9
ED
476
477 /* As we dropped a packet, better let upper stack know this */
2ccccf5f 478 qdisc_tree_reduce_backlog(sch, 1, dropped);
e1738bd9 479 return NET_XMIT_SUCCESS;
1da177e4
LT
480}
481
1da177e4 482static struct sk_buff *
6f9e98f7 483sfq_dequeue(struct Qdisc *sch)
1da177e4
LT
484{
485 struct sfq_sched_data *q = qdisc_priv(sch);
486 struct sk_buff *skb;
aa3e2199 487 sfq_index a, next_a;
eda83e3b 488 struct sfq_slot *slot;
1da177e4
LT
489
490 /* No active slots */
eda83e3b 491 if (q->tail == NULL)
1da177e4
LT
492 return NULL;
493
eeaeb068 494next_slot:
eda83e3b
ED
495 a = q->tail->next;
496 slot = &q->slots[a];
eeaeb068
ED
497 if (slot->allot <= 0) {
498 q->tail = slot;
499 slot->allot += q->scaled_quantum;
500 goto next_slot;
501 }
eda83e3b 502 skb = slot_dequeue_head(slot);
1da177e4 503 sfq_dec(q, a);
9190b3b3 504 qdisc_bstats_update(sch, skb);
1da177e4 505 sch->q.qlen--;
25331d6c 506 qdisc_qstats_backlog_dec(sch, skb);
ddecf0f4 507 slot->backlog -= qdisc_pkt_len(skb);
1da177e4 508 /* Is the slot empty? */
eda83e3b
ED
509 if (slot->qlen == 0) {
510 q->ht[slot->hash] = SFQ_EMPTY_SLOT;
511 next_a = slot->next;
aa3e2199 512 if (a == next_a) {
eda83e3b 513 q->tail = NULL; /* no more active slots */
1da177e4
LT
514 return skb;
515 }
eda83e3b 516 q->tail->next = next_a;
eeaeb068
ED
517 } else {
518 slot->allot -= SFQ_ALLOT_SIZE(qdisc_pkt_len(skb));
1da177e4
LT
519 }
520 return skb;
521}
522
523static void
6f9e98f7 524sfq_reset(struct Qdisc *sch)
1da177e4
LT
525{
526 struct sk_buff *skb;
527
528 while ((skb = sfq_dequeue(sch)) != NULL)
fea02478 529 rtnl_kfree_skbs(skb, skb);
1da177e4
LT
530}
531
225d9b89
ED
532/*
533 * When q->perturbation is changed, we rehash all queued skbs
534 * to avoid OOO (Out Of Order) effects.
535 * We dont use sfq_dequeue()/sfq_enqueue() because we dont want to change
536 * counters.
537 */
18cb8098 538static void sfq_rehash(struct Qdisc *sch)
225d9b89 539{
18cb8098 540 struct sfq_sched_data *q = qdisc_priv(sch);
225d9b89
ED
541 struct sk_buff *skb;
542 int i;
543 struct sfq_slot *slot;
544 struct sk_buff_head list;
18cb8098 545 int dropped = 0;
2ccccf5f 546 unsigned int drop_len = 0;
225d9b89
ED
547
548 __skb_queue_head_init(&list);
549
18cb8098 550 for (i = 0; i < q->maxflows; i++) {
225d9b89
ED
551 slot = &q->slots[i];
552 if (!slot->qlen)
553 continue;
554 while (slot->qlen) {
555 skb = slot_dequeue_head(slot);
556 sfq_dec(q, i);
557 __skb_queue_tail(&list, skb);
558 }
ddecf0f4
ED
559 slot->backlog = 0;
560 red_set_vars(&slot->vars);
225d9b89
ED
561 q->ht[slot->hash] = SFQ_EMPTY_SLOT;
562 }
563 q->tail = NULL;
564
565 while ((skb = __skb_dequeue(&list)) != NULL) {
566 unsigned int hash = sfq_hash(q, skb);
567 sfq_index x = q->ht[hash];
568
569 slot = &q->slots[x];
570 if (x == SFQ_EMPTY_SLOT) {
571 x = q->dep[0].next; /* get a free slot */
18cb8098 572 if (x >= SFQ_MAX_FLOWS) {
25331d6c
JF
573drop:
574 qdisc_qstats_backlog_dec(sch, skb);
2ccccf5f 575 drop_len += qdisc_pkt_len(skb);
18cb8098
ED
576 kfree_skb(skb);
577 dropped++;
578 continue;
579 }
225d9b89
ED
580 q->ht[hash] = x;
581 slot = &q->slots[x];
582 slot->hash = hash;
583 }
18cb8098
ED
584 if (slot->qlen >= q->maxdepth)
585 goto drop;
225d9b89 586 slot_queue_add(slot, skb);
ddecf0f4
ED
587 if (q->red_parms)
588 slot->vars.qavg = red_calc_qavg(q->red_parms,
589 &slot->vars,
590 slot->backlog);
591 slot->backlog += qdisc_pkt_len(skb);
225d9b89
ED
592 sfq_inc(q, x);
593 if (slot->qlen == 1) { /* The flow is new */
594 if (q->tail == NULL) { /* It is the first flow */
595 slot->next = x;
596 } else {
597 slot->next = q->tail->next;
598 q->tail->next = x;
599 }
600 q->tail = slot;
601 slot->allot = q->scaled_quantum;
602 }
603 }
18cb8098 604 sch->q.qlen -= dropped;
2ccccf5f 605 qdisc_tree_reduce_backlog(sch, dropped, drop_len);
225d9b89
ED
606}
607
cdeabbb8 608static void sfq_perturbation(struct timer_list *t)
1da177e4 609{
cdeabbb8
KC
610 struct sfq_sched_data *q = from_timer(q, t, perturb_timer);
611 struct Qdisc *sch = q->sch;
225d9b89 612 spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
1da177e4 613
225d9b89 614 spin_lock(root_lock);
63862b5b 615 q->perturbation = prandom_u32();
225d9b89 616 if (!q->filter_list && q->tail)
18cb8098 617 sfq_rehash(sch);
225d9b89 618 spin_unlock(root_lock);
1da177e4 619
32740ddc
AK
620 if (q->perturb_period)
621 mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
1da177e4
LT
622}
623
1e90474c 624static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
1da177e4
LT
625{
626 struct sfq_sched_data *q = qdisc_priv(sch);
1e90474c 627 struct tc_sfq_qopt *ctl = nla_data(opt);
18cb8098 628 struct tc_sfq_qopt_v1 *ctl_v1 = NULL;
2ccccf5f 629 unsigned int qlen, dropped = 0;
ddecf0f4 630 struct red_parms *p = NULL;
f9ab7425
GF
631 struct sk_buff *to_free = NULL;
632 struct sk_buff *tail = NULL;
1da177e4 633
1e90474c 634 if (opt->nla_len < nla_attr_size(sizeof(*ctl)))
1da177e4 635 return -EINVAL;
18cb8098
ED
636 if (opt->nla_len >= nla_attr_size(sizeof(*ctl_v1)))
637 ctl_v1 = nla_data(opt);
119b3d38 638 if (ctl->divisor &&
639 (!is_power_of_2(ctl->divisor) || ctl->divisor > 65536))
640 return -EINVAL;
ddecf0f4
ED
641 if (ctl_v1 && ctl_v1->qth_min) {
642 p = kmalloc(sizeof(*p), GFP_KERNEL);
643 if (!p)
644 return -ENOMEM;
645 }
1da177e4 646 sch_tree_lock(sch);
18cb8098
ED
647 if (ctl->quantum) {
648 q->quantum = ctl->quantum;
649 q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
650 }
6f9e98f7 651 q->perturb_period = ctl->perturb_period * HZ;
18cb8098
ED
652 if (ctl->flows)
653 q->maxflows = min_t(u32, ctl->flows, SFQ_MAX_FLOWS);
654 if (ctl->divisor) {
817fb15d 655 q->divisor = ctl->divisor;
18cb8098
ED
656 q->maxflows = min_t(u32, q->maxflows, q->divisor);
657 }
658 if (ctl_v1) {
659 if (ctl_v1->depth)
660 q->maxdepth = min_t(u32, ctl_v1->depth, SFQ_MAX_DEPTH);
ddecf0f4
ED
661 if (p) {
662 swap(q->red_parms, p);
663 red_set_parms(q->red_parms,
664 ctl_v1->qth_min, ctl_v1->qth_max,
665 ctl_v1->Wlog,
666 ctl_v1->Plog, ctl_v1->Scell_log,
667 NULL,
668 ctl_v1->max_P);
669 }
670 q->flags = ctl_v1->flags;
18cb8098
ED
671 q->headdrop = ctl_v1->headdrop;
672 }
673 if (ctl->limit) {
674 q->limit = min_t(u32, ctl->limit, q->maxdepth * q->maxflows);
675 q->maxflows = min_t(u32, q->maxflows, q->limit);
676 }
677
5e50da01 678 qlen = sch->q.qlen;
f9ab7425
GF
679 while (sch->q.qlen > q->limit) {
680 dropped += sfq_drop(sch, &to_free);
681 if (!tail)
682 tail = to_free;
683 }
684
685 rtnl_kfree_skbs(to_free, tail);
2ccccf5f 686 qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
1da177e4
LT
687
688 del_timer(&q->perturb_timer);
689 if (q->perturb_period) {
32740ddc 690 mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
63862b5b 691 q->perturbation = prandom_u32();
1da177e4
LT
692 }
693 sch_tree_unlock(sch);
ddecf0f4 694 kfree(p);
1da177e4
LT
695 return 0;
696}
697
bd16a6cc
ED
698static void *sfq_alloc(size_t sz)
699{
752ade68 700 return kvmalloc(sz, GFP_KERNEL);
bd16a6cc
ED
701}
702
703static void sfq_free(void *addr)
704{
4cb28970 705 kvfree(addr);
bd16a6cc
ED
706}
707
708static void sfq_destroy(struct Qdisc *sch)
709{
710 struct sfq_sched_data *q = qdisc_priv(sch);
711
6529eaba 712 tcf_block_put(q->block);
bd16a6cc
ED
713 q->perturb_period = 0;
714 del_timer_sync(&q->perturb_timer);
715 sfq_free(q->ht);
18cb8098 716 sfq_free(q->slots);
ddecf0f4 717 kfree(q->red_parms);
bd16a6cc
ED
718}
719
1e90474c 720static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
1da177e4
LT
721{
722 struct sfq_sched_data *q = qdisc_priv(sch);
723 int i;
6529eaba
JP
724 int err;
725
cdeabbb8 726 timer_setup(&q->perturb_timer, sfq_perturbation, TIMER_DEFERRABLE);
e2326576 727
69d78ef2 728 err = tcf_block_get(&q->block, &q->filter_list, sch);
6529eaba
JP
729 if (err)
730 return err;
1da177e4 731
18cb8098
ED
732 for (i = 0; i < SFQ_MAX_DEPTH + 1; i++) {
733 q->dep[i].next = i + SFQ_MAX_FLOWS;
734 q->dep[i].prev = i + SFQ_MAX_FLOWS;
1da177e4 735 }
6f9e98f7 736
18cb8098
ED
737 q->limit = SFQ_MAX_DEPTH;
738 q->maxdepth = SFQ_MAX_DEPTH;
eda83e3b
ED
739 q->cur_depth = 0;
740 q->tail = NULL;
817fb15d 741 q->divisor = SFQ_DEFAULT_HASH_DIVISOR;
18cb8098 742 q->maxflows = SFQ_DEFAULT_FLOWS;
02a9098e
ED
743 q->quantum = psched_mtu(qdisc_dev(sch));
744 q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
745 q->perturb_period = 0;
63862b5b 746 q->perturbation = prandom_u32();
02a9098e
ED
747
748 if (opt) {
1da177e4
LT
749 int err = sfq_change(sch, opt);
750 if (err)
751 return err;
752 }
6f9e98f7 753
bd16a6cc 754 q->ht = sfq_alloc(sizeof(q->ht[0]) * q->divisor);
18cb8098
ED
755 q->slots = sfq_alloc(sizeof(q->slots[0]) * q->maxflows);
756 if (!q->ht || !q->slots) {
87b60cfa 757 /* Note: sfq_destroy() will be called by our caller */
817fb15d 758 return -ENOMEM;
bd16a6cc 759 }
87b60cfa 760
817fb15d
ED
761 for (i = 0; i < q->divisor; i++)
762 q->ht[i] = SFQ_EMPTY_SLOT;
763
18cb8098 764 for (i = 0; i < q->maxflows; i++) {
18c8d82a 765 slot_queue_init(&q->slots[i]);
1da177e4 766 sfq_link(q, i);
18c8d82a 767 }
23624935
ED
768 if (q->limit >= 1)
769 sch->flags |= TCQ_F_CAN_BYPASS;
770 else
771 sch->flags &= ~TCQ_F_CAN_BYPASS;
1da177e4
LT
772 return 0;
773}
774
1da177e4
LT
775static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb)
776{
777 struct sfq_sched_data *q = qdisc_priv(sch);
27a884dc 778 unsigned char *b = skb_tail_pointer(skb);
18cb8098 779 struct tc_sfq_qopt_v1 opt;
ddecf0f4 780 struct red_parms *p = q->red_parms;
18cb8098
ED
781
782 memset(&opt, 0, sizeof(opt));
783 opt.v0.quantum = q->quantum;
784 opt.v0.perturb_period = q->perturb_period / HZ;
785 opt.v0.limit = q->limit;
786 opt.v0.divisor = q->divisor;
787 opt.v0.flows = q->maxflows;
788 opt.depth = q->maxdepth;
789 opt.headdrop = q->headdrop;
1da177e4 790
ddecf0f4
ED
791 if (p) {
792 opt.qth_min = p->qth_min >> p->Wlog;
793 opt.qth_max = p->qth_max >> p->Wlog;
794 opt.Wlog = p->Wlog;
795 opt.Plog = p->Plog;
796 opt.Scell_log = p->Scell_log;
797 opt.max_P = p->max_P;
798 }
799 memcpy(&opt.stats, &q->stats, sizeof(opt.stats));
800 opt.flags = q->flags;
801
1b34ec43
DM
802 if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
803 goto nla_put_failure;
1da177e4
LT
804
805 return skb->len;
806
1e90474c 807nla_put_failure:
dc5fc579 808 nlmsg_trim(skb, b);
1da177e4
LT
809 return -1;
810}
811
41065fba
JP
812static struct Qdisc *sfq_leaf(struct Qdisc *sch, unsigned long arg)
813{
814 return NULL;
815}
816
143976ce 817static unsigned long sfq_find(struct Qdisc *sch, u32 classid)
7d2681a6
PM
818{
819 return 0;
820}
821
eb4a5527
JP
822static unsigned long sfq_bind(struct Qdisc *sch, unsigned long parent,
823 u32 classid)
824{
23624935
ED
825 /* we cannot bypass queue discipline anymore */
826 sch->flags &= ~TCQ_F_CAN_BYPASS;
eb4a5527
JP
827 return 0;
828}
829
143976ce 830static void sfq_unbind(struct Qdisc *q, unsigned long cl)
da7115d9
JP
831{
832}
833
6529eaba 834static struct tcf_block *sfq_tcf_block(struct Qdisc *sch, unsigned long cl)
7d2681a6
PM
835{
836 struct sfq_sched_data *q = qdisc_priv(sch);
837
838 if (cl)
839 return NULL;
6529eaba 840 return q->block;
7d2681a6
PM
841}
842
94de78d1
PM
843static int sfq_dump_class(struct Qdisc *sch, unsigned long cl,
844 struct sk_buff *skb, struct tcmsg *tcm)
845{
846 tcm->tcm_handle |= TC_H_MIN(cl);
847 return 0;
848}
849
850static int sfq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
851 struct gnet_dump *d)
852{
853 struct sfq_sched_data *q = qdisc_priv(sch);
ee09b3c1
ED
854 sfq_index idx = q->ht[cl - 1];
855 struct gnet_stats_queue qs = { 0 };
856 struct tc_sfq_xstats xstats = { 0 };
c4266263 857
ee09b3c1
ED
858 if (idx != SFQ_EMPTY_SLOT) {
859 const struct sfq_slot *slot = &q->slots[idx];
94de78d1 860
eeaeb068 861 xstats.allot = slot->allot << SFQ_ALLOT_SHIFT;
ee09b3c1 862 qs.qlen = slot->qlen;
ddecf0f4 863 qs.backlog = slot->backlog;
ee09b3c1 864 }
b0ab6f92 865 if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0)
94de78d1
PM
866 return -1;
867 return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
868}
869
7d2681a6
PM
870static void sfq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
871{
94de78d1
PM
872 struct sfq_sched_data *q = qdisc_priv(sch);
873 unsigned int i;
874
875 if (arg->stop)
876 return;
877
817fb15d 878 for (i = 0; i < q->divisor; i++) {
eda83e3b 879 if (q->ht[i] == SFQ_EMPTY_SLOT ||
94de78d1
PM
880 arg->count < arg->skip) {
881 arg->count++;
882 continue;
883 }
884 if (arg->fn(sch, i + 1, arg) < 0) {
885 arg->stop = 1;
886 break;
887 }
888 arg->count++;
889 }
7d2681a6
PM
890}
891
892static const struct Qdisc_class_ops sfq_class_ops = {
41065fba 893 .leaf = sfq_leaf,
143976ce 894 .find = sfq_find,
6529eaba 895 .tcf_block = sfq_tcf_block,
eb4a5527 896 .bind_tcf = sfq_bind,
143976ce 897 .unbind_tcf = sfq_unbind,
94de78d1
PM
898 .dump = sfq_dump_class,
899 .dump_stats = sfq_dump_class_stats,
7d2681a6
PM
900 .walk = sfq_walk,
901};
902
20fea08b 903static struct Qdisc_ops sfq_qdisc_ops __read_mostly = {
7d2681a6 904 .cl_ops = &sfq_class_ops,
1da177e4
LT
905 .id = "sfq",
906 .priv_size = sizeof(struct sfq_sched_data),
907 .enqueue = sfq_enqueue,
908 .dequeue = sfq_dequeue,
07bd8df5 909 .peek = qdisc_peek_dequeued,
1da177e4
LT
910 .init = sfq_init,
911 .reset = sfq_reset,
912 .destroy = sfq_destroy,
913 .change = NULL,
914 .dump = sfq_dump,
915 .owner = THIS_MODULE,
916};
917
918static int __init sfq_module_init(void)
919{
920 return register_qdisc(&sfq_qdisc_ops);
921}
10297b99 922static void __exit sfq_module_exit(void)
1da177e4
LT
923{
924 unregister_qdisc(&sfq_qdisc_ops);
925}
926module_init(sfq_module_init)
927module_exit(sfq_module_exit)
928MODULE_LICENSE("GPL");