Merge tag 'mmc-v5.3-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc
[linux-2.6-block.git] / net / sched / sch_fq.c
CommitLineData
2874c5fd 1// SPDX-License-Identifier: GPL-2.0-or-later
afe4fd06
ED
2/*
3 * net/sched/sch_fq.c Fair Queue Packet Scheduler (per flow pacing)
4 *
86b3bfe9 5 * Copyright (C) 2013-2015 Eric Dumazet <edumazet@google.com>
afe4fd06 6 *
05e8bb86 7 * Meant to be mostly used for locally generated traffic :
afe4fd06
ED
8 * Fast classification depends on skb->sk being set before reaching us.
9 * If not, (router workload), we use rxhash as fallback, with 32 bits wide hash.
10 * All packets belonging to a socket are considered as a 'flow'.
11 *
12 * Flows are dynamically allocated and stored in a hash table of RB trees
13 * They are also part of one Round Robin 'queues' (new or old flows)
14 *
15 * Burst avoidance (aka pacing) capability :
16 *
17 * Transport (eg TCP) can set in sk->sk_pacing_rate a rate, enqueue a
18 * bunch of packets, and this packet scheduler adds delay between
19 * packets to respect rate limitation.
20 *
21 * enqueue() :
22 * - lookup one RB tree (out of 1024 or more) to find the flow.
23 * If non existent flow, create it, add it to the tree.
24 * Add skb to the per flow list of skb (fifo).
25 * - Use a special fifo for high prio packets
26 *
27 * dequeue() : serves flows in Round Robin
28 * Note : When a flow becomes empty, we do not immediately remove it from
29 * rb trees, for performance reasons (its expected to send additional packets,
30 * or SLAB cache will reuse socket for another flow)
31 */
32
33#include <linux/module.h>
34#include <linux/types.h>
35#include <linux/kernel.h>
36#include <linux/jiffies.h>
37#include <linux/string.h>
38#include <linux/in.h>
39#include <linux/errno.h>
40#include <linux/init.h>
41#include <linux/skbuff.h>
42#include <linux/slab.h>
43#include <linux/rbtree.h>
44#include <linux/hash.h>
08f89b98 45#include <linux/prefetch.h>
c3bd8549 46#include <linux/vmalloc.h>
afe4fd06
ED
47#include <net/netlink.h>
48#include <net/pkt_sched.h>
49#include <net/sock.h>
50#include <net/tcp_states.h>
98781965 51#include <net/tcp.h>
afe4fd06 52
eeb84aa0
ED
53struct fq_skb_cb {
54 u64 time_to_send;
55};
56
57static inline struct fq_skb_cb *fq_skb_cb(struct sk_buff *skb)
58{
59 qdisc_cb_private_validate(skb, sizeof(struct fq_skb_cb));
60 return (struct fq_skb_cb *)qdisc_skb_cb(skb)->data;
61}
62
afe4fd06 63/*
eeb84aa0
ED
64 * Per flow structure, dynamically allocated.
65 * If packets have monotically increasing time_to_send, they are placed in O(1)
66 * in linear list (head,tail), otherwise are placed in a rbtree (t_root).
afe4fd06
ED
67 */
68struct fq_flow {
eeb84aa0 69 struct rb_root t_root;
afe4fd06
ED
70 struct sk_buff *head; /* list of skbs for this flow : first skb */
71 union {
72 struct sk_buff *tail; /* last skb in the list */
73 unsigned long age; /* jiffies when flow was emptied, for gc */
74 };
05e8bb86 75 struct rb_node fq_node; /* anchor in fq_root[] trees */
afe4fd06
ED
76 struct sock *sk;
77 int qlen; /* number of packets in flow queue */
78 int credit;
79 u32 socket_hash; /* sk_hash */
80 struct fq_flow *next; /* next pointer in RR lists, or &detached */
81
82 struct rb_node rate_node; /* anchor in q->delayed tree */
83 u64 time_next_packet;
84};
85
86struct fq_flow_head {
87 struct fq_flow *first;
88 struct fq_flow *last;
89};
90
91struct fq_sched_data {
92 struct fq_flow_head new_flows;
93
94 struct fq_flow_head old_flows;
95
96 struct rb_root delayed; /* for rate limited flows */
97 u64 time_next_delayed_flow;
fefa569a 98 unsigned long unthrottle_latency_ns;
afe4fd06
ED
99
100 struct fq_flow internal; /* for non classified or high prio packets */
101 u32 quantum;
102 u32 initial_quantum;
f52ed899 103 u32 flow_refill_delay;
afe4fd06 104 u32 flow_plimit; /* max packets per flow */
76a9ebe8 105 unsigned long flow_max_rate; /* optional max rate per flow */
48872c11 106 u64 ce_threshold;
06eb395f 107 u32 orphan_mask; /* mask for orphaned skb */
77879147 108 u32 low_rate_threshold;
afe4fd06
ED
109 struct rb_root *fq_root;
110 u8 rate_enable;
111 u8 fq_trees_log;
112
113 u32 flows;
114 u32 inactive_flows;
115 u32 throttled_flows;
116
117 u64 stat_gc_flows;
118 u64 stat_internal_packets;
afe4fd06 119 u64 stat_throttled;
48872c11 120 u64 stat_ce_mark;
afe4fd06
ED
121 u64 stat_flows_plimit;
122 u64 stat_pkts_too_long;
123 u64 stat_allocation_errors;
124 struct qdisc_watchdog watchdog;
125};
126
127/* special value to mark a detached flow (not on old/new list) */
128static struct fq_flow detached, throttled;
129
130static void fq_flow_set_detached(struct fq_flow *f)
131{
132 f->next = &detached;
f52ed899 133 f->age = jiffies;
afe4fd06
ED
134}
135
136static bool fq_flow_is_detached(const struct fq_flow *f)
137{
138 return f->next == &detached;
139}
140
7df40c26
ED
141static bool fq_flow_is_throttled(const struct fq_flow *f)
142{
143 return f->next == &throttled;
144}
145
146static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow)
147{
148 if (head->first)
149 head->last->next = flow;
150 else
151 head->first = flow;
152 head->last = flow;
153 flow->next = NULL;
154}
155
156static void fq_flow_unset_throttled(struct fq_sched_data *q, struct fq_flow *f)
157{
158 rb_erase(&f->rate_node, &q->delayed);
159 q->throttled_flows--;
160 fq_flow_add_tail(&q->old_flows, f);
161}
162
afe4fd06
ED
163static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f)
164{
165 struct rb_node **p = &q->delayed.rb_node, *parent = NULL;
166
167 while (*p) {
168 struct fq_flow *aux;
169
170 parent = *p;
e124557d 171 aux = rb_entry(parent, struct fq_flow, rate_node);
afe4fd06
ED
172 if (f->time_next_packet >= aux->time_next_packet)
173 p = &parent->rb_right;
174 else
175 p = &parent->rb_left;
176 }
177 rb_link_node(&f->rate_node, parent, p);
178 rb_insert_color(&f->rate_node, &q->delayed);
179 q->throttled_flows++;
180 q->stat_throttled++;
181
182 f->next = &throttled;
183 if (q->time_next_delayed_flow > f->time_next_packet)
184 q->time_next_delayed_flow = f->time_next_packet;
185}
186
187
188static struct kmem_cache *fq_flow_cachep __read_mostly;
189
afe4fd06
ED
190
191/* limit number of collected flows per round */
192#define FQ_GC_MAX 8
193#define FQ_GC_AGE (3*HZ)
194
195static bool fq_gc_candidate(const struct fq_flow *f)
196{
197 return fq_flow_is_detached(f) &&
198 time_after(jiffies, f->age + FQ_GC_AGE);
199}
200
201static void fq_gc(struct fq_sched_data *q,
202 struct rb_root *root,
203 struct sock *sk)
204{
205 struct fq_flow *f, *tofree[FQ_GC_MAX];
206 struct rb_node **p, *parent;
207 int fcnt = 0;
208
209 p = &root->rb_node;
210 parent = NULL;
211 while (*p) {
212 parent = *p;
213
e124557d 214 f = rb_entry(parent, struct fq_flow, fq_node);
afe4fd06
ED
215 if (f->sk == sk)
216 break;
217
218 if (fq_gc_candidate(f)) {
219 tofree[fcnt++] = f;
220 if (fcnt == FQ_GC_MAX)
221 break;
222 }
223
224 if (f->sk > sk)
225 p = &parent->rb_right;
226 else
227 p = &parent->rb_left;
228 }
229
230 q->flows -= fcnt;
231 q->inactive_flows -= fcnt;
232 q->stat_gc_flows += fcnt;
233 while (fcnt) {
234 struct fq_flow *f = tofree[--fcnt];
235
236 rb_erase(&f->fq_node, root);
237 kmem_cache_free(fq_flow_cachep, f);
238 }
239}
240
afe4fd06
ED
241static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
242{
243 struct rb_node **p, *parent;
244 struct sock *sk = skb->sk;
245 struct rb_root *root;
246 struct fq_flow *f;
afe4fd06
ED
247
248 /* warning: no starvation prevention... */
2abc2f07 249 if (unlikely((skb->priority & TC_PRIO_MAX) == TC_PRIO_CONTROL))
afe4fd06
ED
250 return &q->internal;
251
ca6fb065 252 /* SYNACK messages are attached to a TCP_NEW_SYN_RECV request socket
e446f9df 253 * or a listener (SYNCOOKIE mode)
ca6fb065
ED
254 * 1) request sockets are not full blown,
255 * they do not contain sk_pacing_rate
256 * 2) They are not part of a 'flow' yet
257 * 3) We do not want to rate limit them (eg SYNFLOOD attack),
06eb395f 258 * especially if the listener set SO_MAX_PACING_RATE
ca6fb065 259 * 4) We pretend they are orphaned
06eb395f 260 */
e446f9df 261 if (!sk || sk_listener(sk)) {
06eb395f
ED
262 unsigned long hash = skb_get_hash(skb) & q->orphan_mask;
263
afe4fd06
ED
264 /* By forcing low order bit to 1, we make sure to not
265 * collide with a local flow (socket pointers are word aligned)
266 */
06eb395f
ED
267 sk = (struct sock *)((hash << 1) | 1UL);
268 skb_orphan(skb);
37c0aead
ED
269 } else if (sk->sk_state == TCP_CLOSE) {
270 unsigned long hash = skb_get_hash(skb) & q->orphan_mask;
271 /*
272 * Sockets in TCP_CLOSE are non connected.
273 * Typical use case is UDP sockets, they can send packets
274 * with sendto() to many different destinations.
275 * We probably could use a generic bit advertising
276 * non connected sockets, instead of sk_state == TCP_CLOSE,
277 * if we care enough.
278 */
279 sk = (struct sock *)((hash << 1) | 1UL);
afe4fd06
ED
280 }
281
29c58472 282 root = &q->fq_root[hash_ptr(sk, q->fq_trees_log)];
afe4fd06
ED
283
284 if (q->flows >= (2U << q->fq_trees_log) &&
285 q->inactive_flows > q->flows/2)
286 fq_gc(q, root, sk);
287
288 p = &root->rb_node;
289 parent = NULL;
290 while (*p) {
291 parent = *p;
292
e124557d 293 f = rb_entry(parent, struct fq_flow, fq_node);
afe4fd06
ED
294 if (f->sk == sk) {
295 /* socket might have been reallocated, so check
296 * if its sk_hash is the same.
297 * It not, we need to refill credit with
298 * initial quantum
299 */
37c0aead 300 if (unlikely(skb->sk == sk &&
afe4fd06
ED
301 f->socket_hash != sk->sk_hash)) {
302 f->credit = q->initial_quantum;
303 f->socket_hash = sk->sk_hash;
7df40c26
ED
304 if (fq_flow_is_throttled(f))
305 fq_flow_unset_throttled(q, f);
fc59d5bd 306 f->time_next_packet = 0ULL;
afe4fd06
ED
307 }
308 return f;
309 }
310 if (f->sk > sk)
311 p = &parent->rb_right;
312 else
313 p = &parent->rb_left;
314 }
315
316 f = kmem_cache_zalloc(fq_flow_cachep, GFP_ATOMIC | __GFP_NOWARN);
317 if (unlikely(!f)) {
318 q->stat_allocation_errors++;
319 return &q->internal;
320 }
eeb84aa0
ED
321 /* f->t_root is already zeroed after kmem_cache_zalloc() */
322
afe4fd06
ED
323 fq_flow_set_detached(f);
324 f->sk = sk;
37c0aead 325 if (skb->sk == sk)
afe4fd06
ED
326 f->socket_hash = sk->sk_hash;
327 f->credit = q->initial_quantum;
328
329 rb_link_node(&f->fq_node, parent, p);
330 rb_insert_color(&f->fq_node, root);
331
332 q->flows++;
333 q->inactive_flows++;
334 return f;
335}
336
eeb84aa0
ED
337static struct sk_buff *fq_peek(struct fq_flow *flow)
338{
339 struct sk_buff *skb = skb_rb_first(&flow->t_root);
340 struct sk_buff *head = flow->head;
341
342 if (!skb)
343 return head;
344
345 if (!head)
346 return skb;
347
348 if (fq_skb_cb(skb)->time_to_send < fq_skb_cb(head)->time_to_send)
349 return skb;
350 return head;
351}
352
353static void fq_erase_head(struct Qdisc *sch, struct fq_flow *flow,
354 struct sk_buff *skb)
355{
356 if (skb == flow->head) {
357 flow->head = skb->next;
358 } else {
359 rb_erase(&skb->rbnode, &flow->t_root);
360 skb->dev = qdisc_dev(sch);
361 }
362}
afe4fd06
ED
363
364/* remove one skb from head of flow queue */
8d34ce10 365static struct sk_buff *fq_dequeue_head(struct Qdisc *sch, struct fq_flow *flow)
afe4fd06 366{
eeb84aa0 367 struct sk_buff *skb = fq_peek(flow);
afe4fd06
ED
368
369 if (skb) {
eeb84aa0 370 fq_erase_head(sch, flow, skb);
a8305bff 371 skb_mark_not_on_list(skb);
afe4fd06 372 flow->qlen--;
25331d6c 373 qdisc_qstats_backlog_dec(sch, skb);
8d34ce10 374 sch->q.qlen--;
afe4fd06
ED
375 }
376 return skb;
377}
378
afe4fd06
ED
379static void flow_queue_add(struct fq_flow *flow, struct sk_buff *skb)
380{
eeb84aa0
ED
381 struct rb_node **p, *parent;
382 struct sk_buff *head, *aux;
afe4fd06 383
eeb84aa0
ED
384 fq_skb_cb(skb)->time_to_send = skb->tstamp ?: ktime_get_ns();
385
386 head = flow->head;
387 if (!head ||
388 fq_skb_cb(skb)->time_to_send >= fq_skb_cb(flow->tail)->time_to_send) {
389 if (!head)
390 flow->head = skb;
391 else
392 flow->tail->next = skb;
393 flow->tail = skb;
394 skb->next = NULL;
395 return;
396 }
397
398 p = &flow->t_root.rb_node;
399 parent = NULL;
afe4fd06 400
eeb84aa0
ED
401 while (*p) {
402 parent = *p;
403 aux = rb_to_skb(parent);
404 if (fq_skb_cb(skb)->time_to_send >= fq_skb_cb(aux)->time_to_send)
405 p = &parent->rb_right;
406 else
407 p = &parent->rb_left;
408 }
409 rb_link_node(&skb->rbnode, parent, p);
410 rb_insert_color(&skb->rbnode, &flow->t_root);
afe4fd06
ED
411}
412
520ac30f
ED
413static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
414 struct sk_buff **to_free)
afe4fd06
ED
415{
416 struct fq_sched_data *q = qdisc_priv(sch);
417 struct fq_flow *f;
418
419 if (unlikely(sch->q.qlen >= sch->limit))
520ac30f 420 return qdisc_drop(skb, sch, to_free);
afe4fd06
ED
421
422 f = fq_classify(skb, q);
423 if (unlikely(f->qlen >= q->flow_plimit && f != &q->internal)) {
424 q->stat_flows_plimit++;
520ac30f 425 return qdisc_drop(skb, sch, to_free);
afe4fd06
ED
426 }
427
428 f->qlen++;
25331d6c 429 qdisc_qstats_backlog_inc(sch, skb);
afe4fd06 430 if (fq_flow_is_detached(f)) {
218af599
ED
431 struct sock *sk = skb->sk;
432
afe4fd06 433 fq_flow_add_tail(&q->new_flows, f);
f52ed899
ED
434 if (time_after(jiffies, f->age + q->flow_refill_delay))
435 f->credit = max_t(u32, f->credit, q->quantum);
218af599
ED
436 if (sk && q->rate_enable) {
437 if (unlikely(smp_load_acquire(&sk->sk_pacing_status) !=
438 SK_PACING_FQ))
439 smp_store_release(&sk->sk_pacing_status,
440 SK_PACING_FQ);
441 }
afe4fd06 442 q->inactive_flows--;
afe4fd06 443 }
f52ed899
ED
444
445 /* Note: this overwrites f->age */
446 flow_queue_add(f, skb);
447
afe4fd06
ED
448 if (unlikely(f == &q->internal)) {
449 q->stat_internal_packets++;
afe4fd06
ED
450 }
451 sch->q.qlen++;
452
453 return NET_XMIT_SUCCESS;
454}
455
456static void fq_check_throttled(struct fq_sched_data *q, u64 now)
457{
fefa569a 458 unsigned long sample;
afe4fd06
ED
459 struct rb_node *p;
460
461 if (q->time_next_delayed_flow > now)
462 return;
463
fefa569a
ED
464 /* Update unthrottle latency EWMA.
465 * This is cheap and can help diagnosing timer/latency problems.
466 */
467 sample = (unsigned long)(now - q->time_next_delayed_flow);
468 q->unthrottle_latency_ns -= q->unthrottle_latency_ns >> 3;
469 q->unthrottle_latency_ns += sample >> 3;
470
afe4fd06
ED
471 q->time_next_delayed_flow = ~0ULL;
472 while ((p = rb_first(&q->delayed)) != NULL) {
e124557d 473 struct fq_flow *f = rb_entry(p, struct fq_flow, rate_node);
afe4fd06
ED
474
475 if (f->time_next_packet > now) {
476 q->time_next_delayed_flow = f->time_next_packet;
477 break;
478 }
7df40c26 479 fq_flow_unset_throttled(q, f);
afe4fd06
ED
480 }
481}
482
483static struct sk_buff *fq_dequeue(struct Qdisc *sch)
484{
485 struct fq_sched_data *q = qdisc_priv(sch);
afe4fd06
ED
486 struct fq_flow_head *head;
487 struct sk_buff *skb;
488 struct fq_flow *f;
76a9ebe8
ED
489 unsigned long rate;
490 u32 plen;
6b015a52
ED
491 u64 now;
492
493 if (!sch->q.qlen)
494 return NULL;
afe4fd06 495
8d34ce10 496 skb = fq_dequeue_head(sch, &q->internal);
afe4fd06
ED
497 if (skb)
498 goto out;
6b015a52
ED
499
500 now = ktime_get_ns();
afe4fd06
ED
501 fq_check_throttled(q, now);
502begin:
503 head = &q->new_flows;
504 if (!head->first) {
505 head = &q->old_flows;
506 if (!head->first) {
507 if (q->time_next_delayed_flow != ~0ULL)
508 qdisc_watchdog_schedule_ns(&q->watchdog,
45f50bed 509 q->time_next_delayed_flow);
afe4fd06
ED
510 return NULL;
511 }
512 }
513 f = head->first;
514
515 if (f->credit <= 0) {
516 f->credit += q->quantum;
517 head->first = f->next;
518 fq_flow_add_tail(&q->old_flows, f);
519 goto begin;
520 }
521
eeb84aa0 522 skb = fq_peek(f);
7baf33bd 523 if (skb) {
eeb84aa0 524 u64 time_next_packet = max_t(u64, fq_skb_cb(skb)->time_to_send,
ab408b6d
ED
525 f->time_next_packet);
526
527 if (now < time_next_packet) {
528 head->first = f->next;
529 f->time_next_packet = time_next_packet;
530 fq_flow_set_throttled(q, f);
531 goto begin;
532 }
48872c11
ED
533 if (time_next_packet &&
534 (s64)(now - time_next_packet - q->ce_threshold) > 0) {
535 INET_ECN_set_ce(skb);
536 q->stat_ce_mark++;
537 }
afe4fd06
ED
538 }
539
8d34ce10 540 skb = fq_dequeue_head(sch, f);
afe4fd06
ED
541 if (!skb) {
542 head->first = f->next;
543 /* force a pass through old_flows to prevent starvation */
544 if ((head == &q->new_flows) && q->old_flows.first) {
545 fq_flow_add_tail(&q->old_flows, f);
546 } else {
547 fq_flow_set_detached(f);
afe4fd06
ED
548 q->inactive_flows++;
549 }
550 goto begin;
551 }
08f89b98 552 prefetch(&skb->end);
08e14fe4
ED
553 plen = qdisc_pkt_len(skb);
554 f->credit -= plen;
afe4fd06 555
08e14fe4 556 if (!q->rate_enable)
98781965
ED
557 goto out;
558
7eec4174 559 rate = q->flow_max_rate;
08e14fe4
ED
560
561 /* If EDT time was provided for this skb, we need to
562 * update f->time_next_packet only if this qdisc enforces
563 * a flow max rate.
564 */
565 if (!skb->tstamp) {
566 if (skb->sk)
567 rate = min(skb->sk->sk_pacing_rate, rate);
568
569 if (rate <= q->low_rate_threshold) {
570 f->credit = 0;
571 } else {
572 plen = max(plen, q->quantum);
573 if (f->credit > 0)
574 goto out;
575 }
77879147 576 }
76a9ebe8 577 if (rate != ~0UL) {
0eab5eb7
ED
578 u64 len = (u64)plen * NSEC_PER_SEC;
579
7eec4174 580 if (likely(rate))
76a9ebe8 581 len = div64_ul(len, rate);
0eab5eb7 582 /* Since socket rate can change later,
ced7a04e
ED
583 * clamp the delay to 1 second.
584 * Really, providers of too big packets should be fixed !
0eab5eb7 585 */
ced7a04e
ED
586 if (unlikely(len > NSEC_PER_SEC)) {
587 len = NSEC_PER_SEC;
0eab5eb7 588 q->stat_pkts_too_long++;
afe4fd06 589 }
fefa569a
ED
590 /* Account for schedule/timers drifts.
591 * f->time_next_packet was set when prior packet was sent,
592 * and current time (@now) can be too late by tens of us.
593 */
594 if (f->time_next_packet)
595 len -= min(len/2, now - f->time_next_packet);
0eab5eb7 596 f->time_next_packet = now + len;
afe4fd06
ED
597 }
598out:
afe4fd06 599 qdisc_bstats_update(sch, skb);
afe4fd06
ED
600 return skb;
601}
602
e14ffdfd
ED
603static void fq_flow_purge(struct fq_flow *flow)
604{
eeb84aa0
ED
605 struct rb_node *p = rb_first(&flow->t_root);
606
607 while (p) {
608 struct sk_buff *skb = rb_to_skb(p);
609
610 p = rb_next(p);
611 rb_erase(&skb->rbnode, &flow->t_root);
612 rtnl_kfree_skbs(skb, skb);
613 }
e14ffdfd
ED
614 rtnl_kfree_skbs(flow->head, flow->tail);
615 flow->head = NULL;
616 flow->qlen = 0;
617}
618
afe4fd06
ED
619static void fq_reset(struct Qdisc *sch)
620{
8d34ce10
ED
621 struct fq_sched_data *q = qdisc_priv(sch);
622 struct rb_root *root;
8d34ce10
ED
623 struct rb_node *p;
624 struct fq_flow *f;
625 unsigned int idx;
afe4fd06 626
e14ffdfd
ED
627 sch->q.qlen = 0;
628 sch->qstats.backlog = 0;
629
630 fq_flow_purge(&q->internal);
8d34ce10
ED
631
632 if (!q->fq_root)
633 return;
634
635 for (idx = 0; idx < (1U << q->fq_trees_log); idx++) {
636 root = &q->fq_root[idx];
637 while ((p = rb_first(root)) != NULL) {
e124557d 638 f = rb_entry(p, struct fq_flow, fq_node);
8d34ce10
ED
639 rb_erase(p, root);
640
e14ffdfd 641 fq_flow_purge(f);
8d34ce10
ED
642
643 kmem_cache_free(fq_flow_cachep, f);
644 }
645 }
646 q->new_flows.first = NULL;
647 q->old_flows.first = NULL;
648 q->delayed = RB_ROOT;
649 q->flows = 0;
650 q->inactive_flows = 0;
651 q->throttled_flows = 0;
afe4fd06
ED
652}
653
654static void fq_rehash(struct fq_sched_data *q,
655 struct rb_root *old_array, u32 old_log,
656 struct rb_root *new_array, u32 new_log)
657{
658 struct rb_node *op, **np, *parent;
659 struct rb_root *oroot, *nroot;
660 struct fq_flow *of, *nf;
661 int fcnt = 0;
662 u32 idx;
663
664 for (idx = 0; idx < (1U << old_log); idx++) {
665 oroot = &old_array[idx];
666 while ((op = rb_first(oroot)) != NULL) {
667 rb_erase(op, oroot);
e124557d 668 of = rb_entry(op, struct fq_flow, fq_node);
afe4fd06
ED
669 if (fq_gc_candidate(of)) {
670 fcnt++;
671 kmem_cache_free(fq_flow_cachep, of);
672 continue;
673 }
29c58472 674 nroot = &new_array[hash_ptr(of->sk, new_log)];
afe4fd06
ED
675
676 np = &nroot->rb_node;
677 parent = NULL;
678 while (*np) {
679 parent = *np;
680
e124557d 681 nf = rb_entry(parent, struct fq_flow, fq_node);
afe4fd06
ED
682 BUG_ON(nf->sk == of->sk);
683
684 if (nf->sk > of->sk)
685 np = &parent->rb_right;
686 else
687 np = &parent->rb_left;
688 }
689
690 rb_link_node(&of->fq_node, parent, np);
691 rb_insert_color(&of->fq_node, nroot);
692 }
693 }
694 q->flows -= fcnt;
695 q->inactive_flows -= fcnt;
696 q->stat_gc_flows += fcnt;
697}
698
c3bd8549
ED
699static void fq_free(void *addr)
700{
4cb28970 701 kvfree(addr);
c3bd8549
ED
702}
703
704static int fq_resize(struct Qdisc *sch, u32 log)
705{
706 struct fq_sched_data *q = qdisc_priv(sch);
afe4fd06 707 struct rb_root *array;
2d8d40af 708 void *old_fq_root;
afe4fd06
ED
709 u32 idx;
710
711 if (q->fq_root && log == q->fq_trees_log)
712 return 0;
713
c3bd8549 714 /* If XPS was setup, we can allocate memory on right NUMA node */
dcda9b04 715 array = kvmalloc_node(sizeof(struct rb_root) << log, GFP_KERNEL | __GFP_RETRY_MAYFAIL,
c3bd8549 716 netdev_queue_numa_node_read(sch->dev_queue));
afe4fd06
ED
717 if (!array)
718 return -ENOMEM;
719
720 for (idx = 0; idx < (1U << log); idx++)
721 array[idx] = RB_ROOT;
722
2d8d40af
ED
723 sch_tree_lock(sch);
724
725 old_fq_root = q->fq_root;
726 if (old_fq_root)
727 fq_rehash(q, old_fq_root, q->fq_trees_log, array, log);
728
afe4fd06
ED
729 q->fq_root = array;
730 q->fq_trees_log = log;
731
2d8d40af
ED
732 sch_tree_unlock(sch);
733
734 fq_free(old_fq_root);
735
afe4fd06
ED
736 return 0;
737}
738
739static const struct nla_policy fq_policy[TCA_FQ_MAX + 1] = {
740 [TCA_FQ_PLIMIT] = { .type = NLA_U32 },
741 [TCA_FQ_FLOW_PLIMIT] = { .type = NLA_U32 },
742 [TCA_FQ_QUANTUM] = { .type = NLA_U32 },
743 [TCA_FQ_INITIAL_QUANTUM] = { .type = NLA_U32 },
744 [TCA_FQ_RATE_ENABLE] = { .type = NLA_U32 },
745 [TCA_FQ_FLOW_DEFAULT_RATE] = { .type = NLA_U32 },
746 [TCA_FQ_FLOW_MAX_RATE] = { .type = NLA_U32 },
747 [TCA_FQ_BUCKETS_LOG] = { .type = NLA_U32 },
f52ed899 748 [TCA_FQ_FLOW_REFILL_DELAY] = { .type = NLA_U32 },
77879147 749 [TCA_FQ_LOW_RATE_THRESHOLD] = { .type = NLA_U32 },
48872c11 750 [TCA_FQ_CE_THRESHOLD] = { .type = NLA_U32 },
afe4fd06
ED
751};
752
2030721c
AA
753static int fq_change(struct Qdisc *sch, struct nlattr *opt,
754 struct netlink_ext_ack *extack)
afe4fd06
ED
755{
756 struct fq_sched_data *q = qdisc_priv(sch);
757 struct nlattr *tb[TCA_FQ_MAX + 1];
758 int err, drop_count = 0;
2ccccf5f 759 unsigned drop_len = 0;
afe4fd06
ED
760 u32 fq_log;
761
762 if (!opt)
763 return -EINVAL;
764
8cb08174
JB
765 err = nla_parse_nested_deprecated(tb, TCA_FQ_MAX, opt, fq_policy,
766 NULL);
afe4fd06
ED
767 if (err < 0)
768 return err;
769
770 sch_tree_lock(sch);
771
772 fq_log = q->fq_trees_log;
773
774 if (tb[TCA_FQ_BUCKETS_LOG]) {
775 u32 nval = nla_get_u32(tb[TCA_FQ_BUCKETS_LOG]);
776
777 if (nval >= 1 && nval <= ilog2(256*1024))
778 fq_log = nval;
779 else
780 err = -EINVAL;
781 }
782 if (tb[TCA_FQ_PLIMIT])
783 sch->limit = nla_get_u32(tb[TCA_FQ_PLIMIT]);
784
785 if (tb[TCA_FQ_FLOW_PLIMIT])
786 q->flow_plimit = nla_get_u32(tb[TCA_FQ_FLOW_PLIMIT]);
787
3725a269
KKJ
788 if (tb[TCA_FQ_QUANTUM]) {
789 u32 quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]);
790
791 if (quantum > 0)
792 q->quantum = quantum;
793 else
794 err = -EINVAL;
795 }
afe4fd06
ED
796
797 if (tb[TCA_FQ_INITIAL_QUANTUM])
ede869cd 798 q->initial_quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]);
afe4fd06
ED
799
800 if (tb[TCA_FQ_FLOW_DEFAULT_RATE])
65c5189a
ED
801 pr_warn_ratelimited("sch_fq: defrate %u ignored.\n",
802 nla_get_u32(tb[TCA_FQ_FLOW_DEFAULT_RATE]));
afe4fd06 803
76a9ebe8
ED
804 if (tb[TCA_FQ_FLOW_MAX_RATE]) {
805 u32 rate = nla_get_u32(tb[TCA_FQ_FLOW_MAX_RATE]);
afe4fd06 806
76a9ebe8
ED
807 q->flow_max_rate = (rate == ~0U) ? ~0UL : rate;
808 }
77879147
ED
809 if (tb[TCA_FQ_LOW_RATE_THRESHOLD])
810 q->low_rate_threshold =
811 nla_get_u32(tb[TCA_FQ_LOW_RATE_THRESHOLD]);
812
afe4fd06
ED
813 if (tb[TCA_FQ_RATE_ENABLE]) {
814 u32 enable = nla_get_u32(tb[TCA_FQ_RATE_ENABLE]);
815
816 if (enable <= 1)
817 q->rate_enable = enable;
818 else
819 err = -EINVAL;
820 }
821
f52ed899
ED
822 if (tb[TCA_FQ_FLOW_REFILL_DELAY]) {
823 u32 usecs_delay = nla_get_u32(tb[TCA_FQ_FLOW_REFILL_DELAY]) ;
824
825 q->flow_refill_delay = usecs_to_jiffies(usecs_delay);
826 }
827
06eb395f
ED
828 if (tb[TCA_FQ_ORPHAN_MASK])
829 q->orphan_mask = nla_get_u32(tb[TCA_FQ_ORPHAN_MASK]);
830
48872c11
ED
831 if (tb[TCA_FQ_CE_THRESHOLD])
832 q->ce_threshold = (u64)NSEC_PER_USEC *
833 nla_get_u32(tb[TCA_FQ_CE_THRESHOLD]);
834
2d8d40af
ED
835 if (!err) {
836 sch_tree_unlock(sch);
c3bd8549 837 err = fq_resize(sch, fq_log);
2d8d40af
ED
838 sch_tree_lock(sch);
839 }
afe4fd06
ED
840 while (sch->q.qlen > sch->limit) {
841 struct sk_buff *skb = fq_dequeue(sch);
842
8d34ce10
ED
843 if (!skb)
844 break;
2ccccf5f 845 drop_len += qdisc_pkt_len(skb);
e14ffdfd 846 rtnl_kfree_skbs(skb, skb);
afe4fd06
ED
847 drop_count++;
848 }
2ccccf5f 849 qdisc_tree_reduce_backlog(sch, drop_count, drop_len);
afe4fd06
ED
850
851 sch_tree_unlock(sch);
852 return err;
853}
854
855static void fq_destroy(struct Qdisc *sch)
856{
857 struct fq_sched_data *q = qdisc_priv(sch);
afe4fd06 858
8d34ce10 859 fq_reset(sch);
c3bd8549 860 fq_free(q->fq_root);
afe4fd06
ED
861 qdisc_watchdog_cancel(&q->watchdog);
862}
863
e63d7dfd
AA
864static int fq_init(struct Qdisc *sch, struct nlattr *opt,
865 struct netlink_ext_ack *extack)
afe4fd06
ED
866{
867 struct fq_sched_data *q = qdisc_priv(sch);
868 int err;
869
870 sch->limit = 10000;
871 q->flow_plimit = 100;
872 q->quantum = 2 * psched_mtu(qdisc_dev(sch));
873 q->initial_quantum = 10 * psched_mtu(qdisc_dev(sch));
f52ed899 874 q->flow_refill_delay = msecs_to_jiffies(40);
76a9ebe8 875 q->flow_max_rate = ~0UL;
fefa569a 876 q->time_next_delayed_flow = ~0ULL;
afe4fd06
ED
877 q->rate_enable = 1;
878 q->new_flows.first = NULL;
879 q->old_flows.first = NULL;
880 q->delayed = RB_ROOT;
881 q->fq_root = NULL;
882 q->fq_trees_log = ilog2(1024);
06eb395f 883 q->orphan_mask = 1024 - 1;
77879147 884 q->low_rate_threshold = 550000 / 8;
48872c11
ED
885
886 /* Default ce_threshold of 4294 seconds */
887 q->ce_threshold = (u64)NSEC_PER_USEC * ~0U;
888
fb420d5d 889 qdisc_watchdog_init_clockid(&q->watchdog, sch, CLOCK_MONOTONIC);
afe4fd06
ED
890
891 if (opt)
2030721c 892 err = fq_change(sch, opt, extack);
afe4fd06 893 else
c3bd8549 894 err = fq_resize(sch, q->fq_trees_log);
afe4fd06
ED
895
896 return err;
897}
898
899static int fq_dump(struct Qdisc *sch, struct sk_buff *skb)
900{
901 struct fq_sched_data *q = qdisc_priv(sch);
48872c11 902 u64 ce_threshold = q->ce_threshold;
afe4fd06
ED
903 struct nlattr *opts;
904
ae0be8de 905 opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
afe4fd06
ED
906 if (opts == NULL)
907 goto nla_put_failure;
908
65c5189a
ED
909 /* TCA_FQ_FLOW_DEFAULT_RATE is not used anymore */
910
48872c11
ED
911 do_div(ce_threshold, NSEC_PER_USEC);
912
afe4fd06
ED
913 if (nla_put_u32(skb, TCA_FQ_PLIMIT, sch->limit) ||
914 nla_put_u32(skb, TCA_FQ_FLOW_PLIMIT, q->flow_plimit) ||
915 nla_put_u32(skb, TCA_FQ_QUANTUM, q->quantum) ||
916 nla_put_u32(skb, TCA_FQ_INITIAL_QUANTUM, q->initial_quantum) ||
917 nla_put_u32(skb, TCA_FQ_RATE_ENABLE, q->rate_enable) ||
76a9ebe8
ED
918 nla_put_u32(skb, TCA_FQ_FLOW_MAX_RATE,
919 min_t(unsigned long, q->flow_max_rate, ~0U)) ||
f52ed899
ED
920 nla_put_u32(skb, TCA_FQ_FLOW_REFILL_DELAY,
921 jiffies_to_usecs(q->flow_refill_delay)) ||
06eb395f 922 nla_put_u32(skb, TCA_FQ_ORPHAN_MASK, q->orphan_mask) ||
77879147
ED
923 nla_put_u32(skb, TCA_FQ_LOW_RATE_THRESHOLD,
924 q->low_rate_threshold) ||
48872c11 925 nla_put_u32(skb, TCA_FQ_CE_THRESHOLD, (u32)ce_threshold) ||
afe4fd06
ED
926 nla_put_u32(skb, TCA_FQ_BUCKETS_LOG, q->fq_trees_log))
927 goto nla_put_failure;
928
d59b7d80 929 return nla_nest_end(skb, opts);
afe4fd06
ED
930
931nla_put_failure:
932 return -1;
933}
934
935static int fq_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
936{
937 struct fq_sched_data *q = qdisc_priv(sch);
695b4ec0
ED
938 struct tc_fq_qd_stats st;
939
940 sch_tree_lock(sch);
941
942 st.gc_flows = q->stat_gc_flows;
943 st.highprio_packets = q->stat_internal_packets;
90caf67b 944 st.tcp_retrans = 0;
695b4ec0
ED
945 st.throttled = q->stat_throttled;
946 st.flows_plimit = q->stat_flows_plimit;
947 st.pkts_too_long = q->stat_pkts_too_long;
948 st.allocation_errors = q->stat_allocation_errors;
fb420d5d 949 st.time_next_delayed_flow = q->time_next_delayed_flow - ktime_get_ns();
695b4ec0
ED
950 st.flows = q->flows;
951 st.inactive_flows = q->inactive_flows;
952 st.throttled_flows = q->throttled_flows;
fefa569a
ED
953 st.unthrottle_latency_ns = min_t(unsigned long,
954 q->unthrottle_latency_ns, ~0U);
48872c11 955 st.ce_mark = q->stat_ce_mark;
695b4ec0 956 sch_tree_unlock(sch);
afe4fd06
ED
957
958 return gnet_stats_copy_app(d, &st, sizeof(st));
959}
960
961static struct Qdisc_ops fq_qdisc_ops __read_mostly = {
962 .id = "fq",
963 .priv_size = sizeof(struct fq_sched_data),
964
965 .enqueue = fq_enqueue,
966 .dequeue = fq_dequeue,
967 .peek = qdisc_peek_dequeued,
968 .init = fq_init,
969 .reset = fq_reset,
970 .destroy = fq_destroy,
971 .change = fq_change,
972 .dump = fq_dump,
973 .dump_stats = fq_dump_stats,
974 .owner = THIS_MODULE,
975};
976
977static int __init fq_module_init(void)
978{
979 int ret;
980
981 fq_flow_cachep = kmem_cache_create("fq_flow_cache",
982 sizeof(struct fq_flow),
983 0, 0, NULL);
984 if (!fq_flow_cachep)
985 return -ENOMEM;
986
987 ret = register_qdisc(&fq_qdisc_ops);
988 if (ret)
989 kmem_cache_destroy(fq_flow_cachep);
990 return ret;
991}
992
993static void __exit fq_module_exit(void)
994{
995 unregister_qdisc(&fq_qdisc_ops);
996 kmem_cache_destroy(fq_flow_cachep);
997}
998
999module_init(fq_module_init)
1000module_exit(fq_module_exit)
1001MODULE_AUTHOR("Eric Dumazet");
1002MODULE_LICENSE("GPL");