Merge tag '6.4-rc-ksmbd-server-fixes-part2' of git://git.samba.org/ksmbd
[linux-block.git] / net / sched / sch_htb.c
CommitLineData
2874c5fd 1// SPDX-License-Identifier: GPL-2.0-or-later
87990467 2/*
1da177e4
LT
3 * net/sched/sch_htb.c Hierarchical token bucket, feed tree version
4 *
1da177e4
LT
5 * Authors: Martin Devera, <devik@cdi.cz>
6 *
7 * Credits (in time order) for older HTB versions:
8 * Stef Coene <stef.coene@docum.org>
9 * HTB support at LARTC mailing list
10297b99 10 * Ondrej Kraus, <krauso@barr.cz>
1da177e4
LT
11 * found missing INIT_QDISC(htb)
12 * Vladimir Smelhaus, Aamer Akhter, Bert Hubert
13 * helped a lot to locate nasty class stall bug
14 * Andi Kleen, Jamal Hadi, Bert Hubert
15 * code review and helpful comments on shaping
16 * Tomasz Wrona, <tw@eter.tym.pl>
17 * created test case so that I was able to fix nasty bug
18 * Wilfried Weissmann
19 * spotted bug in dequeue code and helped with fix
20 * Jiri Fojtasek
21 * fixed requeue routine
22 * and many others. thanks.
1da177e4 23 */
1da177e4 24#include <linux/module.h>
47083fc0 25#include <linux/moduleparam.h>
1da177e4
LT
26#include <linux/types.h>
27#include <linux/kernel.h>
1da177e4 28#include <linux/string.h>
1da177e4 29#include <linux/errno.h>
1da177e4
LT
30#include <linux/skbuff.h>
31#include <linux/list.h>
32#include <linux/compiler.h>
0ba48053 33#include <linux/rbtree.h>
1224736d 34#include <linux/workqueue.h>
5a0e3ad6 35#include <linux/slab.h>
dc5fc579 36#include <net/netlink.h>
292f1c7f 37#include <net/sch_generic.h>
1da177e4 38#include <net/pkt_sched.h>
cf1facda 39#include <net/pkt_cls.h>
1da177e4
LT
40
41/* HTB algorithm.
42 Author: devik@cdi.cz
43 ========================================================================
44 HTB is like TBF with multiple classes. It is also similar to CBQ because
10297b99 45 it allows to assign priority to each class in hierarchy.
1da177e4
LT
46 In fact it is another implementation of Floyd's formal sharing.
47
48 Levels:
10297b99 49 Each class is assigned level. Leaf has ALWAYS level 0 and root
1da177e4
LT
50 classes have level TC_HTB_MAXDEPTH-1. Interior nodes has level
51 one less than their parent.
52*/
53
47083fc0 54static int htb_hysteresis __read_mostly = 0; /* whether to use mode hysteresis for speedup */
37f2ad2b 55#define HTB_VER 0x30011 /* major must be matched with number supplied by TC as version */
1da177e4
LT
56
57#if HTB_VER >> 16 != TC_HTB_PROTOVER
58#error "Mismatched sch_htb.c and pkt_sch.h"
59#endif
60
47083fc0
JDB
61/* Module parameter and sysfs export */
62module_param (htb_hysteresis, int, 0640);
63MODULE_PARM_DESC(htb_hysteresis, "Hysteresis mode, less CPU load, less accurate");
64
64153ce0
ED
65static int htb_rate_est = 0; /* htb classes have a default rate estimator */
66module_param(htb_rate_est, int, 0640);
67MODULE_PARM_DESC(htb_rate_est, "setup a default rate estimator (4sec 16sec) for htb classes");
68
1da177e4
LT
69/* used internaly to keep status of single class */
70enum htb_cmode {
87990467
SH
71 HTB_CANT_SEND, /* class can't send and can't borrow */
72 HTB_MAY_BORROW, /* class can't send but may borrow */
73 HTB_CAN_SEND /* class can send */
1da177e4
LT
74};
75
c9364636
ED
76struct htb_prio {
77 union {
78 struct rb_root row;
79 struct rb_root feed;
80 };
81 struct rb_node *ptr;
82 /* When class changes from state 1->2 and disconnects from
83 * parent's feed then we lost ptr value and start from the
84 * first child again. Here we store classid of the
85 * last valid ptr (used when ptr is NULL).
86 */
87 u32 last_ptr_id;
88};
89
ca4ec90b
ED
90/* interior & leaf nodes; props specific to leaves are marked L:
91 * To reduce false sharing, place mostly read fields at beginning,
92 * and mostly written ones at the end.
93 */
87990467 94struct htb_class {
f4c1f3e0 95 struct Qdisc_class_common common;
ca4ec90b
ED
96 struct psched_ratecfg rate;
97 struct psched_ratecfg ceil;
98 s64 buffer, cbuffer;/* token bucket depth/rate */
99 s64 mbuffer; /* max wait time */
cbd37556 100 u32 prio; /* these two are used only by leaves... */
ca4ec90b
ED
101 int quantum; /* but stored for parent-to-leaf return */
102
25d8c0d5 103 struct tcf_proto __rcu *filter_list; /* class attached filters */
6529eaba 104 struct tcf_block *block;
ca4ec90b 105 int filter_cnt;
ca4ec90b
ED
106
107 int level; /* our level (see above) */
108 unsigned int children;
109 struct htb_class *parent; /* parent class */
110
1c0d32fd 111 struct net_rate_estimator __rcu *rate_est;
1da177e4 112
ca4ec90b
ED
113 /*
114 * Written often fields
115 */
50dc9a85
AD
116 struct gnet_stats_basic_sync bstats;
117 struct gnet_stats_basic_sync bstats_bias;
ca4ec90b 118 struct tc_htb_xstats xstats; /* our special stats */
87990467 119
ca4ec90b
ED
120 /* token bucket parameters */
121 s64 tokens, ctokens;/* current number of tokens */
122 s64 t_c; /* checkpoint time */
c19f7a34 123
87990467
SH
124 union {
125 struct htb_class_leaf {
c9364636
ED
126 int deficit[TC_HTB_MAXDEPTH];
127 struct Qdisc *q;
ca49bfd9 128 struct netdev_queue *offload_queue;
87990467
SH
129 } leaf;
130 struct htb_class_inner {
c9364636 131 struct htb_prio clprio[TC_HTB_NUMPRIO];
87990467 132 } inner;
11957be2 133 };
ca4ec90b 134 s64 pq_key;
87990467 135
ca4ec90b
ED
136 int prio_activity; /* for which prios are we active */
137 enum htb_cmode cmode; /* current mode of the class */
138 struct rb_node pq_node; /* node for event queue */
139 struct rb_node node[TC_HTB_NUMPRIO]; /* node for self or feed tree */
338ed9b4
ED
140
141 unsigned int drops ____cacheline_aligned_in_smp;
3c75f6ee 142 unsigned int overlimits;
1da177e4
LT
143};
144
c9364636
ED
145struct htb_level {
146 struct rb_root wait_pq;
147 struct htb_prio hprio[TC_HTB_NUMPRIO];
148};
149
87990467 150struct htb_sched {
f4c1f3e0 151 struct Qdisc_class_hash clhash;
c9364636
ED
152 int defcls; /* class where unclassified flows go to */
153 int rate2quantum; /* quant = rate / rate2quantum */
1da177e4 154
c9364636 155 /* filters for qdisc itself */
25d8c0d5 156 struct tcf_proto __rcu *filter_list;
6529eaba 157 struct tcf_block *block;
1da177e4 158
c9364636
ED
159#define HTB_WARN_TOOMANYEVENTS 0x1
160 unsigned int warned; /* only one warning */
161 int direct_qlen;
162 struct work_struct work;
1da177e4 163
c9364636 164 /* non shaped skbs; let them go directly thru */
48da34b7 165 struct qdisc_skb_head direct_queue;
b362487a
CW
166 u32 direct_pkts;
167 u32 overlimits;
1da177e4 168
c9364636 169 struct qdisc_watchdog watchdog;
1da177e4 170
c9364636 171 s64 now; /* cached dequeue time */
1da177e4 172
c9364636
ED
173 /* time of nearest event per level (row) */
174 s64 near_ev_cache[TC_HTB_MAXDEPTH];
87990467 175
c9364636 176 int row_mask[TC_HTB_MAXDEPTH];
e82181de 177
c9364636 178 struct htb_level hlevel[TC_HTB_MAXDEPTH];
d03b195b
MM
179
180 struct Qdisc **direct_qdiscs;
181 unsigned int num_direct_qdiscs;
182
183 bool offload;
1da177e4
LT
184};
185
1da177e4 186/* find class in global hash table using given handle */
87990467 187static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
1da177e4
LT
188{
189 struct htb_sched *q = qdisc_priv(sch);
f4c1f3e0 190 struct Qdisc_class_common *clc;
0cef296d 191
f4c1f3e0
PM
192 clc = qdisc_class_find(&q->clhash, handle);
193 if (clc == NULL)
1da177e4 194 return NULL;
f4c1f3e0 195 return container_of(clc, struct htb_class, common);
1da177e4
LT
196}
197
143976ce
WC
198static unsigned long htb_search(struct Qdisc *sch, u32 handle)
199{
200 return (unsigned long)htb_find(handle, sch);
201}
43d25378
RD
202
203#define HTB_DIRECT ((struct htb_class *)-1L)
204
1da177e4
LT
205/**
206 * htb_classify - classify a packet into class
43d25378
RD
207 * @skb: the socket buffer
208 * @sch: the active queue discipline
209 * @qerr: pointer for returned status code
1da177e4
LT
210 *
211 * It returns NULL if the packet should be dropped or -1 if the packet
212 * should be passed directly thru. In all other cases leaf class is returned.
213 * We allow direct class selection by classid in priority. The we examine
214 * filters in qdisc and in inner nodes (if higher filter points to the inner
215 * node). If we end up with classid MAJOR:0 we enqueue the skb into special
10297b99 216 * internal fifo (direct). These packets then go directly thru. If we still
25985edc 217 * have no valid leaf we try to use MAJOR:default leaf. It still unsuccessful
1da177e4
LT
218 * then finish and return direct queue.
219 */
87990467
SH
220static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
221 int *qerr)
1da177e4
LT
222{
223 struct htb_sched *q = qdisc_priv(sch);
224 struct htb_class *cl;
225 struct tcf_result res;
226 struct tcf_proto *tcf;
227 int result;
228
229 /* allow to select class by setting skb->priority to valid classid;
cc7ec456
ED
230 * note that nfmark can be used too by attaching filter fw with no
231 * rules in it
232 */
1da177e4 233 if (skb->priority == sch->handle)
87990467 234 return HTB_DIRECT; /* X:0 (direct flow) selected */
cc7ec456 235 cl = htb_find(skb->priority, sch);
29824310
HM
236 if (cl) {
237 if (cl->level == 0)
238 return cl;
239 /* Start with inner filter chain if a non-leaf class is selected */
25d8c0d5 240 tcf = rcu_dereference_bh(cl->filter_list);
29824310 241 } else {
25d8c0d5 242 tcf = rcu_dereference_bh(q->filter_list);
29824310 243 }
1da177e4 244
c27f339a 245 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
3aa26055 246 while (tcf && (result = tcf_classify(skb, NULL, tcf, &res, false)) >= 0) {
1da177e4
LT
247#ifdef CONFIG_NET_CLS_ACT
248 switch (result) {
249 case TC_ACT_QUEUED:
87990467 250 case TC_ACT_STOLEN:
e25ea21f 251 case TC_ACT_TRAP:
378a2f09 252 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
964201de 253 fallthrough;
1da177e4
LT
254 case TC_ACT_SHOT:
255 return NULL;
256 }
1da177e4 257#endif
cc7ec456
ED
258 cl = (void *)res.class;
259 if (!cl) {
1da177e4 260 if (res.classid == sch->handle)
87990467 261 return HTB_DIRECT; /* X:0 (direct flow) */
cc7ec456
ED
262 cl = htb_find(res.classid, sch);
263 if (!cl)
87990467 264 break; /* filter selected invalid classid */
1da177e4
LT
265 }
266 if (!cl->level)
87990467 267 return cl; /* we hit leaf; return it */
1da177e4
LT
268
269 /* we have got inner class; apply inner filter chain */
25d8c0d5 270 tcf = rcu_dereference_bh(cl->filter_list);
1da177e4
LT
271 }
272 /* classification failed; try to use default class */
87990467 273 cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
1da177e4 274 if (!cl || cl->level)
87990467 275 return HTB_DIRECT; /* bad default .. this is safe bet */
1da177e4
LT
276 return cl;
277}
278
1da177e4
LT
279/**
280 * htb_add_to_id_tree - adds class to the round robin list
a10541f5
YK
281 * @root: the root of the tree
282 * @cl: the class to add
283 * @prio: the give prio in class
1da177e4
LT
284 *
285 * Routine adds class to the list (actually tree) sorted by classid.
286 * Make sure that class is not already on such list for given prio.
287 */
87990467
SH
288static void htb_add_to_id_tree(struct rb_root *root,
289 struct htb_class *cl, int prio)
1da177e4
LT
290{
291 struct rb_node **p = &root->rb_node, *parent = NULL;
3bf72957 292
1da177e4 293 while (*p) {
87990467
SH
294 struct htb_class *c;
295 parent = *p;
1da177e4 296 c = rb_entry(parent, struct htb_class, node[prio]);
3bf72957 297
f4c1f3e0 298 if (cl->common.classid > c->common.classid)
1da177e4 299 p = &parent->rb_right;
87990467 300 else
1da177e4
LT
301 p = &parent->rb_left;
302 }
303 rb_link_node(&cl->node[prio], parent, p);
304 rb_insert_color(&cl->node[prio], root);
305}
306
307/**
308 * htb_add_to_wait_tree - adds class to the event queue with delay
4d7efa73
YK
309 * @q: the priority event queue
310 * @cl: the class to add
311 * @delay: delay in microseconds
1da177e4
LT
312 *
313 * The class is added to priority event queue to indicate that class will
314 * change its mode in cl->pq_key microseconds. Make sure that class is not
315 * already in the queue.
316 */
87990467 317static void htb_add_to_wait_tree(struct htb_sched *q,
56b765b7 318 struct htb_class *cl, s64 delay)
1da177e4 319{
c9364636 320 struct rb_node **p = &q->hlevel[cl->level].wait_pq.rb_node, *parent = NULL;
3bf72957 321
fb983d45
PM
322 cl->pq_key = q->now + delay;
323 if (cl->pq_key == q->now)
1da177e4
LT
324 cl->pq_key++;
325
326 /* update the nearest event cache */
fb983d45 327 if (q->near_ev_cache[cl->level] > cl->pq_key)
1da177e4 328 q->near_ev_cache[cl->level] = cl->pq_key;
87990467 329
1da177e4 330 while (*p) {
87990467
SH
331 struct htb_class *c;
332 parent = *p;
1da177e4 333 c = rb_entry(parent, struct htb_class, pq_node);
fb983d45 334 if (cl->pq_key >= c->pq_key)
1da177e4 335 p = &parent->rb_right;
87990467 336 else
1da177e4
LT
337 p = &parent->rb_left;
338 }
339 rb_link_node(&cl->pq_node, parent, p);
c9364636 340 rb_insert_color(&cl->pq_node, &q->hlevel[cl->level].wait_pq);
1da177e4
LT
341}
342
343/**
344 * htb_next_rb_node - finds next node in binary tree
274e5d0e 345 * @n: the current node in binary tree
1da177e4
LT
346 *
347 * When we are past last key we return NULL.
348 * Average complexity is 2 steps per call.
349 */
3696f625 350static inline void htb_next_rb_node(struct rb_node **n)
1da177e4
LT
351{
352 *n = rb_next(*n);
353}
354
355/**
356 * htb_add_class_to_row - add class to its row
996bccc3
YK
357 * @q: the priority event queue
358 * @cl: the class to add
359 * @mask: the given priorities in class in bitmap
1da177e4
LT
360 *
361 * The class is added to row at priorities marked in mask.
362 * It does nothing if mask == 0.
363 */
87990467
SH
364static inline void htb_add_class_to_row(struct htb_sched *q,
365 struct htb_class *cl, int mask)
1da177e4 366{
1da177e4
LT
367 q->row_mask[cl->level] |= mask;
368 while (mask) {
369 int prio = ffz(~mask);
370 mask &= ~(1 << prio);
c9364636 371 htb_add_to_id_tree(&q->hlevel[cl->level].hprio[prio].row, cl, prio);
1da177e4
LT
372 }
373}
374
3696f625
SH
375/* If this triggers, it is a bug in this code, but it need not be fatal */
376static void htb_safe_rb_erase(struct rb_node *rb, struct rb_root *root)
377{
81771b3b 378 if (RB_EMPTY_NODE(rb)) {
3696f625
SH
379 WARN_ON(1);
380 } else {
381 rb_erase(rb, root);
382 RB_CLEAR_NODE(rb);
383 }
384}
385
386
1da177e4
LT
387/**
388 * htb_remove_class_from_row - removes class from its row
5f8c6d05
YK
389 * @q: the priority event queue
390 * @cl: the class to add
391 * @mask: the given priorities in class in bitmap
1da177e4
LT
392 *
393 * The class is removed from row at priorities marked in mask.
394 * It does nothing if mask == 0.
395 */
87990467
SH
396static inline void htb_remove_class_from_row(struct htb_sched *q,
397 struct htb_class *cl, int mask)
1da177e4
LT
398{
399 int m = 0;
c9364636 400 struct htb_level *hlevel = &q->hlevel[cl->level];
3bf72957 401
1da177e4
LT
402 while (mask) {
403 int prio = ffz(~mask);
c9364636 404 struct htb_prio *hprio = &hlevel->hprio[prio];
3696f625 405
1da177e4 406 mask &= ~(1 << prio);
c9364636
ED
407 if (hprio->ptr == cl->node + prio)
408 htb_next_rb_node(&hprio->ptr);
3696f625 409
c9364636
ED
410 htb_safe_rb_erase(cl->node + prio, &hprio->row);
411 if (!hprio->row.rb_node)
1da177e4
LT
412 m |= 1 << prio;
413 }
1da177e4
LT
414 q->row_mask[cl->level] &= ~m;
415}
416
417/**
418 * htb_activate_prios - creates active classe's feed chain
876b5fc0
YK
419 * @q: the priority event queue
420 * @cl: the class to activate
1da177e4
LT
421 *
422 * The class is connected to ancestors and/or appropriate rows
10297b99 423 * for priorities it is participating on. cl->cmode must be new
1da177e4
LT
424 * (activated) mode. It does nothing if cl->prio_activity == 0.
425 */
87990467 426static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
1da177e4
LT
427{
428 struct htb_class *p = cl->parent;
87990467 429 long m, mask = cl->prio_activity;
1da177e4
LT
430
431 while (cl->cmode == HTB_MAY_BORROW && p && mask) {
87990467
SH
432 m = mask;
433 while (m) {
de5ca4c3
KC
434 unsigned int prio = ffz(~m);
435
9cec2aaf 436 if (WARN_ON_ONCE(prio >= ARRAY_SIZE(p->inner.clprio)))
de5ca4c3 437 break;
1da177e4 438 m &= ~(1 << prio);
87990467 439
11957be2 440 if (p->inner.clprio[prio].feed.rb_node)
1da177e4 441 /* parent already has its feed in use so that
cc7ec456
ED
442 * reset bit in mask as parent is already ok
443 */
1da177e4 444 mask &= ~(1 << prio);
87990467 445
11957be2 446 htb_add_to_id_tree(&p->inner.clprio[prio].feed, cl, prio);
1da177e4 447 }
1da177e4 448 p->prio_activity |= mask;
87990467
SH
449 cl = p;
450 p = cl->parent;
3bf72957 451
1da177e4
LT
452 }
453 if (cl->cmode == HTB_CAN_SEND && mask)
87990467 454 htb_add_class_to_row(q, cl, mask);
1da177e4
LT
455}
456
457/**
458 * htb_deactivate_prios - remove class from feed chain
4113be20
YK
459 * @q: the priority event queue
460 * @cl: the class to deactivate
1da177e4 461 *
10297b99 462 * cl->cmode must represent old mode (before deactivation). It does
1da177e4
LT
463 * nothing if cl->prio_activity == 0. Class is removed from all feed
464 * chains and rows.
465 */
466static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
467{
468 struct htb_class *p = cl->parent;
87990467 469 long m, mask = cl->prio_activity;
1da177e4
LT
470
471 while (cl->cmode == HTB_MAY_BORROW && p && mask) {
87990467
SH
472 m = mask;
473 mask = 0;
1da177e4
LT
474 while (m) {
475 int prio = ffz(~m);
476 m &= ~(1 << prio);
87990467 477
11957be2 478 if (p->inner.clprio[prio].ptr == cl->node + prio) {
1da177e4 479 /* we are removing child which is pointed to from
cc7ec456
ED
480 * parent feed - forget the pointer but remember
481 * classid
482 */
11957be2
CW
483 p->inner.clprio[prio].last_ptr_id = cl->common.classid;
484 p->inner.clprio[prio].ptr = NULL;
1da177e4 485 }
87990467 486
c9364636 487 htb_safe_rb_erase(cl->node + prio,
11957be2 488 &p->inner.clprio[prio].feed);
87990467 489
11957be2 490 if (!p->inner.clprio[prio].feed.rb_node)
1da177e4
LT
491 mask |= 1 << prio;
492 }
3bf72957 493
1da177e4 494 p->prio_activity &= ~mask;
87990467
SH
495 cl = p;
496 p = cl->parent;
3bf72957 497
1da177e4 498 }
87990467
SH
499 if (cl->cmode == HTB_CAN_SEND && mask)
500 htb_remove_class_from_row(q, cl, mask);
1da177e4
LT
501}
502
56b765b7 503static inline s64 htb_lowater(const struct htb_class *cl)
18a63e86 504{
47083fc0
JDB
505 if (htb_hysteresis)
506 return cl->cmode != HTB_CANT_SEND ? -cl->cbuffer : 0;
507 else
508 return 0;
18a63e86 509}
56b765b7 510static inline s64 htb_hiwater(const struct htb_class *cl)
18a63e86 511{
47083fc0
JDB
512 if (htb_hysteresis)
513 return cl->cmode == HTB_CAN_SEND ? -cl->buffer : 0;
514 else
515 return 0;
18a63e86 516}
47083fc0 517
18a63e86 518
1da177e4
LT
519/**
520 * htb_class_mode - computes and returns current class mode
1e955952
YK
521 * @cl: the target class
522 * @diff: diff time in microseconds
1da177e4
LT
523 *
524 * It computes cl's mode at time cl->t_c+diff and returns it. If mode
525 * is not HTB_CAN_SEND then cl->pq_key is updated to time difference
10297b99 526 * from now to time when cl will change its state.
1da177e4 527 * Also it is worth to note that class mode doesn't change simply
10297b99 528 * at cl->{c,}tokens == 0 but there can rather be hysteresis of
1da177e4
LT
529 * 0 .. -cl->{c,}buffer range. It is meant to limit number of
530 * mode transitions per time unit. The speed gain is about 1/6.
531 */
87990467 532static inline enum htb_cmode
56b765b7 533htb_class_mode(struct htb_class *cl, s64 *diff)
1da177e4 534{
56b765b7 535 s64 toks;
1da177e4 536
87990467
SH
537 if ((toks = (cl->ctokens + *diff)) < htb_lowater(cl)) {
538 *diff = -toks;
539 return HTB_CANT_SEND;
540 }
18a63e86 541
87990467
SH
542 if ((toks = (cl->tokens + *diff)) >= htb_hiwater(cl))
543 return HTB_CAN_SEND;
1da177e4 544
87990467
SH
545 *diff = -toks;
546 return HTB_MAY_BORROW;
1da177e4
LT
547}
548
549/**
550 * htb_change_class_mode - changes classe's mode
4b479e98
YK
551 * @q: the priority event queue
552 * @cl: the target class
553 * @diff: diff time in microseconds
1da177e4
LT
554 *
555 * This should be the only way how to change classe's mode under normal
37f2ad2b 556 * circumstances. Routine will update feed lists linkage, change mode
1da177e4
LT
557 * and add class to the wait event queue if appropriate. New mode should
558 * be different from old one and cl->pq_key has to be valid if changing
559 * to mode other than HTB_CAN_SEND (see htb_add_to_wait_tree).
560 */
87990467 561static void
56b765b7 562htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, s64 *diff)
87990467
SH
563{
564 enum htb_cmode new_mode = htb_class_mode(cl, diff);
1da177e4
LT
565
566 if (new_mode == cl->cmode)
87990467
SH
567 return;
568
b362487a 569 if (new_mode == HTB_CANT_SEND) {
3c75f6ee 570 cl->overlimits++;
b362487a
CW
571 q->overlimits++;
572 }
3c75f6ee 573
87990467
SH
574 if (cl->prio_activity) { /* not necessary: speed optimization */
575 if (cl->cmode != HTB_CANT_SEND)
576 htb_deactivate_prios(q, cl);
1da177e4 577 cl->cmode = new_mode;
87990467
SH
578 if (new_mode != HTB_CANT_SEND)
579 htb_activate_prios(q, cl);
580 } else
1da177e4
LT
581 cl->cmode = new_mode;
582}
583
584/**
10297b99 585 * htb_activate - inserts leaf cl into appropriate active feeds
8df7e8ff
YK
586 * @q: the priority event queue
587 * @cl: the target class
1da177e4
LT
588 *
589 * Routine learns (new) priority of leaf and activates feed chain
590 * for the prio. It can be called on already active leaf safely.
591 * It also adds leaf into droplist.
592 */
87990467 593static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
1da177e4 594{
11957be2 595 WARN_ON(cl->level || !cl->leaf.q || !cl->leaf.q->q.qlen);
3bf72957 596
1da177e4 597 if (!cl->prio_activity) {
c19f7a34 598 cl->prio_activity = 1 << cl->prio;
87990467 599 htb_activate_prios(q, cl);
1da177e4
LT
600 }
601}
602
603/**
10297b99 604 * htb_deactivate - remove leaf cl from active feeds
9a034f25
YK
605 * @q: the priority event queue
606 * @cl: the target class
1da177e4
LT
607 *
608 * Make sure that leaf is active. In the other words it can't be called
609 * with non-active leaf. It also removes class from the drop list.
610 */
87990467 611static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
1da177e4 612{
547b792c 613 WARN_ON(!cl->prio_activity);
3bf72957 614
87990467 615 htb_deactivate_prios(q, cl);
1da177e4 616 cl->prio_activity = 0;
1da177e4
LT
617}
618
520ac30f
ED
619static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
620 struct sk_buff **to_free)
1da177e4 621{
3f649ab7 622 int ret;
f6bab199 623 unsigned int len = qdisc_pkt_len(skb);
87990467
SH
624 struct htb_sched *q = qdisc_priv(sch);
625 struct htb_class *cl = htb_classify(skb, sch, &ret);
626
627 if (cl == HTB_DIRECT) {
628 /* enqueue to helper queue */
629 if (q->direct_queue.qlen < q->direct_qlen) {
aea890b8 630 __qdisc_enqueue_tail(skb, &q->direct_queue);
87990467
SH
631 q->direct_pkts++;
632 } else {
520ac30f 633 return qdisc_drop(skb, sch, to_free);
87990467 634 }
1da177e4 635#ifdef CONFIG_NET_CLS_ACT
87990467 636 } else if (!cl) {
c27f339a 637 if (ret & __NET_XMIT_BYPASS)
25331d6c 638 qdisc_qstats_drop(sch);
520ac30f 639 __qdisc_drop(skb, to_free);
87990467 640 return ret;
1da177e4 641#endif
11957be2 642 } else if ((ret = qdisc_enqueue(skb, cl->leaf.q,
520ac30f 643 to_free)) != NET_XMIT_SUCCESS) {
378a2f09 644 if (net_xmit_drop_count(ret)) {
25331d6c 645 qdisc_qstats_drop(sch);
338ed9b4 646 cl->drops++;
378a2f09 647 }
69747650 648 return ret;
87990467 649 } else {
87990467
SH
650 htb_activate(q, cl);
651 }
652
f6bab199 653 sch->qstats.backlog += len;
87990467 654 sch->q.qlen++;
87990467 655 return NET_XMIT_SUCCESS;
1da177e4
LT
656}
657
56b765b7 658static inline void htb_accnt_tokens(struct htb_class *cl, int bytes, s64 diff)
59e4220a 659{
56b765b7 660 s64 toks = diff + cl->tokens;
59e4220a
JP
661
662 if (toks > cl->buffer)
663 toks = cl->buffer;
292f1c7f 664 toks -= (s64) psched_l2t_ns(&cl->rate, bytes);
59e4220a
JP
665 if (toks <= -cl->mbuffer)
666 toks = 1 - cl->mbuffer;
667
668 cl->tokens = toks;
669}
670
56b765b7 671static inline void htb_accnt_ctokens(struct htb_class *cl, int bytes, s64 diff)
59e4220a 672{
56b765b7 673 s64 toks = diff + cl->ctokens;
59e4220a
JP
674
675 if (toks > cl->cbuffer)
676 toks = cl->cbuffer;
292f1c7f 677 toks -= (s64) psched_l2t_ns(&cl->ceil, bytes);
59e4220a
JP
678 if (toks <= -cl->mbuffer)
679 toks = 1 - cl->mbuffer;
680
681 cl->ctokens = toks;
682}
683
1da177e4
LT
684/**
685 * htb_charge_class - charges amount "bytes" to leaf and ancestors
0e5c9084
YK
686 * @q: the priority event queue
687 * @cl: the class to start iterate
688 * @level: the minimum level to account
689 * @skb: the socket buffer
1da177e4
LT
690 *
691 * Routine assumes that packet "bytes" long was dequeued from leaf cl
692 * borrowing from "level". It accounts bytes to ceil leaky bucket for
693 * leaf and all ancestors and to rate bucket for ancestors at levels
694 * "level" and higher. It also handles possible change of mode resulting
695 * from the update. Note that mode can also increase here (MAY_BORROW to
696 * CAN_SEND) because we can use more precise clock that event queue here.
697 * In such case we remove class from event queue first.
698 */
87990467 699static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
c9726d68 700 int level, struct sk_buff *skb)
87990467 701{
0abf77e5 702 int bytes = qdisc_pkt_len(skb);
1da177e4 703 enum htb_cmode old_mode;
56b765b7 704 s64 diff;
1da177e4
LT
705
706 while (cl) {
56b765b7 707 diff = min_t(s64, q->now - cl->t_c, cl->mbuffer);
1da177e4 708 if (cl->level >= level) {
87990467
SH
709 if (cl->level == level)
710 cl->xstats.lends++;
59e4220a 711 htb_accnt_tokens(cl, bytes, diff);
1da177e4
LT
712 } else {
713 cl->xstats.borrows++;
87990467 714 cl->tokens += diff; /* we moved t_c; update tokens */
1da177e4 715 }
59e4220a 716 htb_accnt_ctokens(cl, bytes, diff);
1da177e4 717 cl->t_c = q->now;
1da177e4 718
87990467
SH
719 old_mode = cl->cmode;
720 diff = 0;
721 htb_change_class_mode(q, cl, &diff);
1da177e4
LT
722 if (old_mode != cl->cmode) {
723 if (old_mode != HTB_CAN_SEND)
c9364636 724 htb_safe_rb_erase(&cl->pq_node, &q->hlevel[cl->level].wait_pq);
1da177e4 725 if (cl->cmode != HTB_CAN_SEND)
87990467 726 htb_add_to_wait_tree(q, cl, diff);
1da177e4 727 }
1da177e4 728
bfe0d029
ED
729 /* update basic stats except for leaves which are already updated */
730 if (cl->level)
731 bstats_update(&cl->bstats, skb);
732
1da177e4
LT
733 cl = cl->parent;
734 }
735}
736
737/**
738 * htb_do_events - make mode changes to classes at the level
2c3ee53e
YK
739 * @q: the priority event queue
740 * @level: which wait_pq in 'q->hlevel'
741 * @start: start jiffies
1da177e4 742 *
fb983d45 743 * Scans event queue for pending events and applies them. Returns time of
1224736d 744 * next pending event (0 for no event in pq, q->now for too many events).
fb983d45 745 * Note: Applied are events whose have cl->pq_key <= q->now.
1da177e4 746 */
c9364636 747static s64 htb_do_events(struct htb_sched *q, const int level,
5343a7f8 748 unsigned long start)
1da177e4 749{
8f3ea33a 750 /* don't run for longer than 2 jiffies; 2 is used instead of
cc7ec456
ED
751 * 1 to simplify things when jiffy is going to be incremented
752 * too soon
753 */
a73be040 754 unsigned long stop_at = start + 2;
c9364636
ED
755 struct rb_root *wait_pq = &q->hlevel[level].wait_pq;
756
8f3ea33a 757 while (time_before(jiffies, stop_at)) {
1da177e4 758 struct htb_class *cl;
56b765b7 759 s64 diff;
c9364636 760 struct rb_node *p = rb_first(wait_pq);
30bdbe39 761
87990467
SH
762 if (!p)
763 return 0;
1da177e4
LT
764
765 cl = rb_entry(p, struct htb_class, pq_node);
fb983d45
PM
766 if (cl->pq_key > q->now)
767 return cl->pq_key;
768
c9364636 769 htb_safe_rb_erase(p, wait_pq);
56b765b7 770 diff = min_t(s64, q->now - cl->t_c, cl->mbuffer);
87990467 771 htb_change_class_mode(q, cl, &diff);
1da177e4 772 if (cl->cmode != HTB_CAN_SEND)
87990467 773 htb_add_to_wait_tree(q, cl, diff);
1da177e4 774 }
1224736d
JP
775
776 /* too much load - let's continue after a break for scheduling */
e82181de 777 if (!(q->warned & HTB_WARN_TOOMANYEVENTS)) {
c17988a9 778 pr_warn("htb: too many events!\n");
e82181de
JP
779 q->warned |= HTB_WARN_TOOMANYEVENTS;
780 }
1224736d
JP
781
782 return q->now;
1da177e4
LT
783}
784
785/* Returns class->node+prio from id-tree where classe's id is >= id. NULL
cc7ec456
ED
786 * is no such one exists.
787 */
87990467
SH
788static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n,
789 u32 id)
1da177e4
LT
790{
791 struct rb_node *r = NULL;
792 while (n) {
87990467
SH
793 struct htb_class *cl =
794 rb_entry(n, struct htb_class, node[prio]);
87990467 795
f4c1f3e0 796 if (id > cl->common.classid) {
1da177e4 797 n = n->rb_right;
1b5c0077 798 } else if (id < cl->common.classid) {
1da177e4
LT
799 r = n;
800 n = n->rb_left;
1b5c0077
JP
801 } else {
802 return n;
1da177e4
LT
803 }
804 }
805 return r;
806}
807
808/**
809 * htb_lookup_leaf - returns next leaf class in DRR order
9977d6f5
YK
810 * @hprio: the current one
811 * @prio: which prio in class
1da177e4
LT
812 *
813 * Find leaf where current feed pointers points to.
814 */
c9364636 815static struct htb_class *htb_lookup_leaf(struct htb_prio *hprio, const int prio)
1da177e4
LT
816{
817 int i;
818 struct {
819 struct rb_node *root;
820 struct rb_node **pptr;
821 u32 *pid;
87990467
SH
822 } stk[TC_HTB_MAXDEPTH], *sp = stk;
823
c9364636
ED
824 BUG_ON(!hprio->row.rb_node);
825 sp->root = hprio->row.rb_node;
826 sp->pptr = &hprio->ptr;
827 sp->pid = &hprio->last_ptr_id;
1da177e4
LT
828
829 for (i = 0; i < 65535; i++) {
87990467 830 if (!*sp->pptr && *sp->pid) {
10297b99 831 /* ptr was invalidated but id is valid - try to recover
cc7ec456
ED
832 * the original or next ptr
833 */
87990467
SH
834 *sp->pptr =
835 htb_id_find_next_upper(prio, sp->root, *sp->pid);
1da177e4 836 }
87990467 837 *sp->pid = 0; /* ptr is valid now so that remove this hint as it
cc7ec456
ED
838 * can become out of date quickly
839 */
87990467 840 if (!*sp->pptr) { /* we are at right end; rewind & go up */
1da177e4 841 *sp->pptr = sp->root;
87990467 842 while ((*sp->pptr)->rb_left)
1da177e4
LT
843 *sp->pptr = (*sp->pptr)->rb_left;
844 if (sp > stk) {
845 sp--;
512bb43e
JP
846 if (!*sp->pptr) {
847 WARN_ON(1);
87990467 848 return NULL;
512bb43e 849 }
87990467 850 htb_next_rb_node(sp->pptr);
1da177e4
LT
851 }
852 } else {
853 struct htb_class *cl;
c9364636
ED
854 struct htb_prio *clp;
855
87990467
SH
856 cl = rb_entry(*sp->pptr, struct htb_class, node[prio]);
857 if (!cl->level)
1da177e4 858 return cl;
11957be2 859 clp = &cl->inner.clprio[prio];
c9364636
ED
860 (++sp)->root = clp->feed.rb_node;
861 sp->pptr = &clp->ptr;
862 sp->pid = &clp->last_ptr_id;
1da177e4
LT
863 }
864 }
547b792c 865 WARN_ON(1);
1da177e4
LT
866 return NULL;
867}
868
869/* dequeues packet at given priority and level; call only if
cc7ec456
ED
870 * you are sure that there is active class at prio/level
871 */
c9364636
ED
872static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, const int prio,
873 const int level)
1da177e4
LT
874{
875 struct sk_buff *skb = NULL;
87990467 876 struct htb_class *cl, *start;
c9364636
ED
877 struct htb_level *hlevel = &q->hlevel[level];
878 struct htb_prio *hprio = &hlevel->hprio[prio];
879
1da177e4 880 /* look initial class up in the row */
c9364636 881 start = cl = htb_lookup_leaf(hprio, prio);
87990467 882
1da177e4
LT
883 do {
884next:
512bb43e 885 if (unlikely(!cl))
87990467 886 return NULL;
1da177e4
LT
887
888 /* class can be empty - it is unlikely but can be true if leaf
cc7ec456
ED
889 * qdisc drops packets in enqueue routine or if someone used
890 * graft operation on the leaf since last dequeue;
891 * simply deactivate and skip such class
892 */
11957be2 893 if (unlikely(cl->leaf.q->q.qlen == 0)) {
1da177e4 894 struct htb_class *next;
87990467 895 htb_deactivate(q, cl);
1da177e4
LT
896
897 /* row/level might become empty */
898 if ((q->row_mask[level] & (1 << prio)) == 0)
87990467 899 return NULL;
1da177e4 900
c9364636 901 next = htb_lookup_leaf(hprio, prio);
87990467
SH
902
903 if (cl == start) /* fix start if we just deleted it */
1da177e4
LT
904 start = next;
905 cl = next;
906 goto next;
907 }
87990467 908
11957be2 909 skb = cl->leaf.q->dequeue(cl->leaf.q);
87990467 910 if (likely(skb != NULL))
1da177e4 911 break;
633fe66e 912
11957be2
CW
913 qdisc_warn_nonwc("htb", cl->leaf.q);
914 htb_next_rb_node(level ? &cl->parent->inner.clprio[prio].ptr:
c9364636
ED
915 &q->hlevel[0].hprio[prio].ptr);
916 cl = htb_lookup_leaf(hprio, prio);
1da177e4
LT
917
918 } while (cl != start);
919
920 if (likely(skb != NULL)) {
196d97f6 921 bstats_update(&cl->bstats, skb);
11957be2
CW
922 cl->leaf.deficit[level] -= qdisc_pkt_len(skb);
923 if (cl->leaf.deficit[level] < 0) {
924 cl->leaf.deficit[level] += cl->quantum;
925 htb_next_rb_node(level ? &cl->parent->inner.clprio[prio].ptr :
c9364636 926 &q->hlevel[0].hprio[prio].ptr);
1da177e4
LT
927 }
928 /* this used to be after charge_class but this constelation
cc7ec456
ED
929 * gives us slightly better performance
930 */
11957be2 931 if (!cl->leaf.q->q.qlen)
87990467 932 htb_deactivate(q, cl);
c9726d68 933 htb_charge_class(q, cl, level, skb);
1da177e4
LT
934 }
935 return skb;
936}
937
1da177e4
LT
938static struct sk_buff *htb_dequeue(struct Qdisc *sch)
939{
9190b3b3 940 struct sk_buff *skb;
1da177e4
LT
941 struct htb_sched *q = qdisc_priv(sch);
942 int level;
5343a7f8 943 s64 next_event;
a73be040 944 unsigned long start_at;
1da177e4
LT
945
946 /* try to dequeue direct packets as high prio (!) to minimize cpu work */
48da34b7 947 skb = __qdisc_dequeue_head(&q->direct_queue);
87990467 948 if (skb != NULL) {
9190b3b3
ED
949ok:
950 qdisc_bstats_update(sch, skb);
431e3a8e 951 qdisc_qstats_backlog_dec(sch, skb);
1da177e4
LT
952 sch->q.qlen--;
953 return skb;
954 }
955
87990467
SH
956 if (!sch->q.qlen)
957 goto fin;
d2de875c 958 q->now = ktime_get_ns();
a73be040 959 start_at = jiffies;
1da177e4 960
d2fe85da 961 next_event = q->now + 5LLU * NSEC_PER_SEC;
633fe66e 962
1da177e4
LT
963 for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
964 /* common case optimization - skip event handler quickly */
965 int m;
c9364636 966 s64 event = q->near_ev_cache[level];
fb983d45 967
c9364636 968 if (q->now >= event) {
a73be040 969 event = htb_do_events(q, level, start_at);
2e4b3b0e 970 if (!event)
56b765b7 971 event = q->now + NSEC_PER_SEC;
2e4b3b0e 972 q->near_ev_cache[level] = event;
c9364636 973 }
fb983d45 974
c0851347 975 if (next_event > event)
fb983d45 976 next_event = event;
87990467 977
1da177e4
LT
978 m = ~q->row_mask[level];
979 while (m != (int)(-1)) {
87990467 980 int prio = ffz(m);
cc7ec456 981
1da177e4 982 m |= 1 << prio;
87990467 983 skb = htb_dequeue_tree(q, prio, level);
9190b3b3
ED
984 if (likely(skb != NULL))
985 goto ok;
1da177e4
LT
986 }
987 }
a9efad8b 988 if (likely(next_event > q->now))
45f50bed 989 qdisc_watchdog_schedule_ns(&q->watchdog, next_event);
a9efad8b 990 else
1224736d 991 schedule_work(&q->work);
1da177e4 992fin:
1da177e4
LT
993 return skb;
994}
995
1da177e4
LT
996/* reset all classes */
997/* always caled under BH & queue lock */
87990467 998static void htb_reset(struct Qdisc *sch)
1da177e4
LT
999{
1000 struct htb_sched *q = qdisc_priv(sch);
f4c1f3e0 1001 struct htb_class *cl;
f4c1f3e0 1002 unsigned int i;
0cef296d 1003
f4c1f3e0 1004 for (i = 0; i < q->clhash.hashsize; i++) {
b67bfe0d 1005 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
1da177e4 1006 if (cl->level)
11957be2 1007 memset(&cl->inner, 0, sizeof(cl->inner));
1da177e4 1008 else {
d03b195b 1009 if (cl->leaf.q && !q->offload)
11957be2 1010 qdisc_reset(cl->leaf.q);
1da177e4
LT
1011 }
1012 cl->prio_activity = 0;
1013 cl->cmode = HTB_CAN_SEND;
1da177e4
LT
1014 }
1015 }
fb983d45 1016 qdisc_watchdog_cancel(&q->watchdog);
a5a9f534 1017 __qdisc_reset_queue(&q->direct_queue);
c9364636 1018 memset(q->hlevel, 0, sizeof(q->hlevel));
87990467 1019 memset(q->row_mask, 0, sizeof(q->row_mask));
1da177e4
LT
1020}
1021
27a3421e
PM
1022static const struct nla_policy htb_policy[TCA_HTB_MAX + 1] = {
1023 [TCA_HTB_PARMS] = { .len = sizeof(struct tc_htb_opt) },
1024 [TCA_HTB_INIT] = { .len = sizeof(struct tc_htb_glob) },
1025 [TCA_HTB_CTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
1026 [TCA_HTB_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
6906f4ed 1027 [TCA_HTB_DIRECT_QLEN] = { .type = NLA_U32 },
df62cdf3
ED
1028 [TCA_HTB_RATE64] = { .type = NLA_U64 },
1029 [TCA_HTB_CEIL64] = { .type = NLA_U64 },
d03b195b 1030 [TCA_HTB_OFFLOAD] = { .type = NLA_FLAG },
27a3421e
PM
1031};
1032
1224736d
JP
1033static void htb_work_func(struct work_struct *work)
1034{
1035 struct htb_sched *q = container_of(work, struct htb_sched, work);
1036 struct Qdisc *sch = q->watchdog.qdisc;
1037
0ee13627 1038 rcu_read_lock();
1224736d 1039 __netif_schedule(qdisc_root(sch));
0ee13627 1040 rcu_read_unlock();
1224736d
JP
1041}
1042
d03b195b
MM
1043static void htb_set_lockdep_class_child(struct Qdisc *q)
1044{
1045 static struct lock_class_key child_key;
1046
1047 lockdep_set_class(qdisc_lock(q), &child_key);
1048}
1049
1050static int htb_offload(struct net_device *dev, struct tc_htb_qopt_offload *opt)
1051{
1052 return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_HTB, opt);
1053}
1054
e63d7dfd
AA
1055static int htb_init(struct Qdisc *sch, struct nlattr *opt,
1056 struct netlink_ext_ack *extack)
1da177e4 1057{
d03b195b
MM
1058 struct net_device *dev = qdisc_dev(sch);
1059 struct tc_htb_qopt_offload offload_opt;
1da177e4 1060 struct htb_sched *q = qdisc_priv(sch);
6906f4ed 1061 struct nlattr *tb[TCA_HTB_MAX + 1];
1da177e4 1062 struct tc_htb_glob *gopt;
d03b195b 1063 unsigned int ntx;
fb3a3e37 1064 bool offload;
cee63723 1065 int err;
cee63723 1066
88c2ace6
NA
1067 qdisc_watchdog_init(&q->watchdog, sch);
1068 INIT_WORK(&q->work, htb_work_func);
1069
cee63723
PM
1070 if (!opt)
1071 return -EINVAL;
1072
8d1a77f9 1073 err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
6529eaba
JP
1074 if (err)
1075 return err;
1076
8cb08174
JB
1077 err = nla_parse_nested_deprecated(tb, TCA_HTB_MAX, opt, htb_policy,
1078 NULL);
cee63723
PM
1079 if (err < 0)
1080 return err;
1081
6906f4ed 1082 if (!tb[TCA_HTB_INIT])
1da177e4 1083 return -EINVAL;
6906f4ed 1084
1e90474c 1085 gopt = nla_data(tb[TCA_HTB_INIT]);
6906f4ed 1086 if (gopt->version != HTB_VER >> 16)
1da177e4 1087 return -EINVAL;
1da177e4 1088
fb3a3e37 1089 offload = nla_get_flag(tb[TCA_HTB_OFFLOAD]);
d03b195b 1090
fb3a3e37 1091 if (offload) {
648a991c
MM
1092 if (sch->parent != TC_H_ROOT) {
1093 NL_SET_ERR_MSG(extack, "HTB must be the root qdisc to use offload");
d03b195b 1094 return -EOPNOTSUPP;
648a991c 1095 }
d03b195b 1096
648a991c
MM
1097 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc) {
1098 NL_SET_ERR_MSG(extack, "hw-tc-offload ethtool feature flag must be on");
d03b195b 1099 return -EOPNOTSUPP;
648a991c 1100 }
d03b195b
MM
1101
1102 q->num_direct_qdiscs = dev->real_num_tx_queues;
1103 q->direct_qdiscs = kcalloc(q->num_direct_qdiscs,
1104 sizeof(*q->direct_qdiscs),
1105 GFP_KERNEL);
1106 if (!q->direct_qdiscs)
1107 return -ENOMEM;
1108 }
1109
f4c1f3e0
PM
1110 err = qdisc_class_hash_init(&q->clhash);
1111 if (err < 0)
d59f4e1d 1112 return err;
1da177e4 1113
6906f4ed
ED
1114 if (tb[TCA_HTB_DIRECT_QLEN])
1115 q->direct_qlen = nla_get_u32(tb[TCA_HTB_DIRECT_QLEN]);
348e3435 1116 else
6906f4ed 1117 q->direct_qlen = qdisc_dev(sch)->tx_queue_len;
348e3435 1118
1da177e4
LT
1119 if ((q->rate2quantum = gopt->rate2quantum) < 1)
1120 q->rate2quantum = 1;
1121 q->defcls = gopt->defcls;
1122
fb3a3e37 1123 if (!offload)
d03b195b
MM
1124 return 0;
1125
1126 for (ntx = 0; ntx < q->num_direct_qdiscs; ntx++) {
1127 struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, ntx);
1128 struct Qdisc *qdisc;
1129
1130 qdisc = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
1131 TC_H_MAKE(sch->handle, 0), extack);
1132 if (!qdisc) {
d59f4e1d 1133 return -ENOMEM;
d03b195b
MM
1134 }
1135
1136 htb_set_lockdep_class_child(qdisc);
1137 q->direct_qdiscs[ntx] = qdisc;
1138 qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
1139 }
1140
1141 sch->flags |= TCQ_F_MQROOT;
1142
1143 offload_opt = (struct tc_htb_qopt_offload) {
1144 .command = TC_HTB_CREATE,
1145 .parent_classid = TC_H_MAJ(sch->handle) >> 16,
1146 .classid = TC_H_MIN(q->defcls),
1147 .extack = extack,
1148 };
1149 err = htb_offload(dev, &offload_opt);
1150 if (err)
d59f4e1d 1151 return err;
d03b195b 1152
fb3a3e37
MM
1153 /* Defer this assignment, so that htb_destroy skips offload-related
1154 * parts (especially calling ndo_setup_tc) on errors.
1155 */
1156 q->offload = true;
1157
1da177e4 1158 return 0;
d03b195b
MM
1159}
1160
1161static void htb_attach_offload(struct Qdisc *sch)
1162{
1163 struct net_device *dev = qdisc_dev(sch);
1164 struct htb_sched *q = qdisc_priv(sch);
1165 unsigned int ntx;
1166
1167 for (ntx = 0; ntx < q->num_direct_qdiscs; ntx++) {
1168 struct Qdisc *old, *qdisc = q->direct_qdiscs[ntx];
1169
1170 old = dev_graft_qdisc(qdisc->dev_queue, qdisc);
1171 qdisc_put(old);
1172 qdisc_hash_add(qdisc, false);
1173 }
1174 for (ntx = q->num_direct_qdiscs; ntx < dev->num_tx_queues; ntx++) {
1175 struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, ntx);
1176 struct Qdisc *old = dev_graft_qdisc(dev_queue, NULL);
1177
1178 qdisc_put(old);
1179 }
1180
1181 kfree(q->direct_qdiscs);
1182 q->direct_qdiscs = NULL;
1183}
1184
1185static void htb_attach_software(struct Qdisc *sch)
1186{
1187 struct net_device *dev = qdisc_dev(sch);
1188 unsigned int ntx;
1189
1190 /* Resemble qdisc_graft behavior. */
1191 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
1192 struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, ntx);
1193 struct Qdisc *old = dev_graft_qdisc(dev_queue, sch);
1194
1195 qdisc_refcount_inc(sch);
1196
1197 qdisc_put(old);
1198 }
1199}
1200
1201static void htb_attach(struct Qdisc *sch)
1202{
1203 struct htb_sched *q = qdisc_priv(sch);
1204
1205 if (q->offload)
1206 htb_attach_offload(sch);
1207 else
1208 htb_attach_software(sch);
1da177e4
LT
1209}
1210
1211static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
1212{
1213 struct htb_sched *q = qdisc_priv(sch);
4b3550ef 1214 struct nlattr *nest;
1da177e4 1215 struct tc_htb_glob gopt;
4b3550ef 1216
d03b195b
MM
1217 if (q->offload)
1218 sch->flags |= TCQ_F_OFFLOADED;
1219 else
1220 sch->flags &= ~TCQ_F_OFFLOADED;
1221
b362487a 1222 sch->qstats.overlimits = q->overlimits;
6f542efc
ED
1223 /* Its safe to not acquire qdisc lock. As we hold RTNL,
1224 * no change can happen on the qdisc parameters.
1225 */
1da177e4 1226
4b3550ef 1227 gopt.direct_pkts = q->direct_pkts;
1da177e4
LT
1228 gopt.version = HTB_VER;
1229 gopt.rate2quantum = q->rate2quantum;
1230 gopt.defcls = q->defcls;
3bf72957 1231 gopt.debug = 0;
4b3550ef 1232
ae0be8de 1233 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
4b3550ef
PM
1234 if (nest == NULL)
1235 goto nla_put_failure;
6906f4ed
ED
1236 if (nla_put(skb, TCA_HTB_INIT, sizeof(gopt), &gopt) ||
1237 nla_put_u32(skb, TCA_HTB_DIRECT_QLEN, q->direct_qlen))
1b34ec43 1238 goto nla_put_failure;
d03b195b
MM
1239 if (q->offload && nla_put_flag(skb, TCA_HTB_OFFLOAD))
1240 goto nla_put_failure;
4b3550ef 1241
6f542efc 1242 return nla_nest_end(skb, nest);
4b3550ef 1243
1e90474c 1244nla_put_failure:
4b3550ef 1245 nla_nest_cancel(skb, nest);
1da177e4
LT
1246 return -1;
1247}
1248
1249static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
87990467 1250 struct sk_buff *skb, struct tcmsg *tcm)
1da177e4 1251{
87990467 1252 struct htb_class *cl = (struct htb_class *)arg;
83271586 1253 struct htb_sched *q = qdisc_priv(sch);
4b3550ef 1254 struct nlattr *nest;
1da177e4
LT
1255 struct tc_htb_opt opt;
1256
6f542efc
ED
1257 /* Its safe to not acquire qdisc lock. As we hold RTNL,
1258 * no change can happen on the class parameters.
1259 */
f4c1f3e0
PM
1260 tcm->tcm_parent = cl->parent ? cl->parent->common.classid : TC_H_ROOT;
1261 tcm->tcm_handle = cl->common.classid;
11957be2
CW
1262 if (!cl->level && cl->leaf.q)
1263 tcm->tcm_info = cl->leaf.q->handle;
1da177e4 1264
ae0be8de 1265 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
4b3550ef
PM
1266 if (nest == NULL)
1267 goto nla_put_failure;
1da177e4 1268
87990467 1269 memset(&opt, 0, sizeof(opt));
1da177e4 1270
01cb71d2 1271 psched_ratecfg_getrate(&opt.rate, &cl->rate);
9c10f411 1272 opt.buffer = PSCHED_NS2TICKS(cl->buffer);
01cb71d2 1273 psched_ratecfg_getrate(&opt.ceil, &cl->ceil);
9c10f411 1274 opt.cbuffer = PSCHED_NS2TICKS(cl->cbuffer);
c19f7a34
JP
1275 opt.quantum = cl->quantum;
1276 opt.prio = cl->prio;
87990467 1277 opt.level = cl->level;
1b34ec43
DM
1278 if (nla_put(skb, TCA_HTB_PARMS, sizeof(opt), &opt))
1279 goto nla_put_failure;
83271586
MM
1280 if (q->offload && nla_put_flag(skb, TCA_HTB_OFFLOAD))
1281 goto nla_put_failure;
df62cdf3 1282 if ((cl->rate.rate_bytes_ps >= (1ULL << 32)) &&
2a51c1e8
ND
1283 nla_put_u64_64bit(skb, TCA_HTB_RATE64, cl->rate.rate_bytes_ps,
1284 TCA_HTB_PAD))
df62cdf3
ED
1285 goto nla_put_failure;
1286 if ((cl->ceil.rate_bytes_ps >= (1ULL << 32)) &&
2a51c1e8
ND
1287 nla_put_u64_64bit(skb, TCA_HTB_CEIL64, cl->ceil.rate_bytes_ps,
1288 TCA_HTB_PAD))
df62cdf3 1289 goto nla_put_failure;
4b3550ef 1290
6f542efc 1291 return nla_nest_end(skb, nest);
4b3550ef 1292
1e90474c 1293nla_put_failure:
4b3550ef 1294 nla_nest_cancel(skb, nest);
1da177e4
LT
1295 return -1;
1296}
1297
83271586
MM
1298static void htb_offload_aggregate_stats(struct htb_sched *q,
1299 struct htb_class *cl)
1300{
f56940da 1301 u64 bytes = 0, packets = 0;
83271586
MM
1302 struct htb_class *c;
1303 unsigned int i;
1304
50dc9a85 1305 gnet_stats_basic_sync_init(&cl->bstats);
83271586
MM
1306
1307 for (i = 0; i < q->clhash.hashsize; i++) {
1308 hlist_for_each_entry(c, &q->clhash.hash[i], common.hnode) {
1309 struct htb_class *p = c;
1310
1311 while (p && p->level < cl->level)
1312 p = p->parent;
1313
1314 if (p != cl)
1315 continue;
1316
50dc9a85
AD
1317 bytes += u64_stats_read(&c->bstats_bias.bytes);
1318 packets += u64_stats_read(&c->bstats_bias.packets);
83271586 1319 if (c->level == 0) {
50dc9a85
AD
1320 bytes += u64_stats_read(&c->leaf.q->bstats.bytes);
1321 packets += u64_stats_read(&c->leaf.q->bstats.packets);
83271586
MM
1322 }
1323 }
1324 }
f56940da 1325 _bstats_update(&cl->bstats, bytes, packets);
83271586
MM
1326}
1327
1da177e4 1328static int
87990467 1329htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
1da177e4 1330{
87990467 1331 struct htb_class *cl = (struct htb_class *)arg;
83271586 1332 struct htb_sched *q = qdisc_priv(sch);
338ed9b4
ED
1333 struct gnet_stats_queue qs = {
1334 .drops = cl->drops,
3c75f6ee 1335 .overlimits = cl->overlimits,
338ed9b4 1336 };
64015853 1337 __u32 qlen = 0;
1da177e4 1338
5dd431b6
PA
1339 if (!cl->level && cl->leaf.q)
1340 qdisc_qstats_qlen_backlog(cl->leaf.q, &qlen, &qs.backlog);
1341
0564bf0a
KK
1342 cl->xstats.tokens = clamp_t(s64, PSCHED_NS2TICKS(cl->tokens),
1343 INT_MIN, INT_MAX);
1344 cl->xstats.ctokens = clamp_t(s64, PSCHED_NS2TICKS(cl->ctokens),
1345 INT_MIN, INT_MAX);
1da177e4 1346
83271586
MM
1347 if (q->offload) {
1348 if (!cl->level) {
1349 if (cl->leaf.q)
1350 cl->bstats = cl->leaf.q->bstats;
1351 else
50dc9a85 1352 gnet_stats_basic_sync_init(&cl->bstats);
f56940da 1353 _bstats_update(&cl->bstats,
50dc9a85
AD
1354 u64_stats_read(&cl->bstats_bias.bytes),
1355 u64_stats_read(&cl->bstats_bias.packets));
83271586
MM
1356 } else {
1357 htb_offload_aggregate_stats(q, cl);
1358 }
1359 }
1360
29cbcd85 1361 if (gnet_stats_copy_basic(d, NULL, &cl->bstats, true) < 0 ||
1c0d32fd 1362 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
338ed9b4 1363 gnet_stats_copy_queue(d, NULL, &qs, qlen) < 0)
1da177e4
LT
1364 return -1;
1365
1366 return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
1367}
1368
d03b195b
MM
1369static struct netdev_queue *
1370htb_select_queue(struct Qdisc *sch, struct tcmsg *tcm)
1371{
1372 struct net_device *dev = qdisc_dev(sch);
1373 struct tc_htb_qopt_offload offload_opt;
93bde210 1374 struct htb_sched *q = qdisc_priv(sch);
d03b195b
MM
1375 int err;
1376
93bde210
MM
1377 if (!q->offload)
1378 return sch->dev_queue;
1379
d03b195b
MM
1380 offload_opt = (struct tc_htb_qopt_offload) {
1381 .command = TC_HTB_LEAF_QUERY_QUEUE,
1382 .classid = TC_H_MIN(tcm->tcm_parent),
1383 };
1384 err = htb_offload(dev, &offload_opt);
1385 if (err || offload_opt.qid >= dev->num_tx_queues)
1386 return NULL;
1387 return netdev_get_tx_queue(dev, offload_opt.qid);
1388}
1389
1390static struct Qdisc *
1391htb_graft_helper(struct netdev_queue *dev_queue, struct Qdisc *new_q)
1392{
1393 struct net_device *dev = dev_queue->dev;
1394 struct Qdisc *old_q;
1395
1396 if (dev->flags & IFF_UP)
1397 dev_deactivate(dev);
1398 old_q = dev_graft_qdisc(dev_queue, new_q);
1399 if (new_q)
1400 new_q->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
1401 if (dev->flags & IFF_UP)
1402 dev_activate(dev);
1403
1404 return old_q;
1405}
1406
ca49bfd9
MM
1407static struct netdev_queue *htb_offload_get_queue(struct htb_class *cl)
1408{
1409 struct netdev_queue *queue;
1410
1411 queue = cl->leaf.offload_queue;
1412 if (!(cl->leaf.q->flags & TCQ_F_BUILTIN))
1413 WARN_ON(cl->leaf.q->dev_queue != queue);
1414
1415 return queue;
1416}
1417
1418static void htb_offload_move_qdisc(struct Qdisc *sch, struct htb_class *cl_old,
1419 struct htb_class *cl_new, bool destroying)
d03b195b
MM
1420{
1421 struct netdev_queue *queue_old, *queue_new;
1422 struct net_device *dev = qdisc_dev(sch);
d03b195b 1423
ca49bfd9
MM
1424 queue_old = htb_offload_get_queue(cl_old);
1425 queue_new = htb_offload_get_queue(cl_new);
d03b195b 1426
ca49bfd9
MM
1427 if (!destroying) {
1428 struct Qdisc *qdisc;
d03b195b 1429
ca49bfd9
MM
1430 if (dev->flags & IFF_UP)
1431 dev_deactivate(dev);
1432 qdisc = dev_graft_qdisc(queue_old, NULL);
1433 WARN_ON(qdisc != cl_old->leaf.q);
1434 }
1435
1436 if (!(cl_old->leaf.q->flags & TCQ_F_BUILTIN))
1437 cl_old->leaf.q->dev_queue = queue_new;
1438 cl_old->leaf.offload_queue = queue_new;
1439
1440 if (!destroying) {
1441 struct Qdisc *qdisc;
1442
1443 qdisc = dev_graft_qdisc(queue_new, cl_old->leaf.q);
1444 if (dev->flags & IFF_UP)
1445 dev_activate(dev);
1446 WARN_ON(!(qdisc->flags & TCQ_F_BUILTIN));
1447 }
d03b195b
MM
1448}
1449
1da177e4 1450static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
653d6fd6 1451 struct Qdisc **old, struct netlink_ext_ack *extack)
1da177e4 1452{
d03b195b 1453 struct netdev_queue *dev_queue = sch->dev_queue;
87990467 1454 struct htb_class *cl = (struct htb_class *)arg;
d03b195b
MM
1455 struct htb_sched *q = qdisc_priv(sch);
1456 struct Qdisc *old_q;
1da177e4 1457
5b9a9ccf
PM
1458 if (cl->level)
1459 return -EINVAL;
d03b195b 1460
ca49bfd9
MM
1461 if (q->offload)
1462 dev_queue = htb_offload_get_queue(cl);
d03b195b
MM
1463
1464 if (!new) {
1465 new = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
1466 cl->common.classid, extack);
1467 if (!new)
1468 return -ENOBUFS;
1469 }
1470
1471 if (q->offload) {
1472 htb_set_lockdep_class_child(new);
1473 /* One ref for cl->leaf.q, the other for dev_queue->qdisc. */
1474 qdisc_refcount_inc(new);
1475 old_q = htb_graft_helper(dev_queue, new);
1476 }
5b9a9ccf 1477
11957be2 1478 *old = qdisc_replace(sch, new, &cl->leaf.q);
d03b195b
MM
1479
1480 if (q->offload) {
1481 WARN_ON(old_q != *old);
1482 qdisc_put(old_q);
1483 }
1484
5b9a9ccf 1485 return 0;
1da177e4
LT
1486}
1487
87990467 1488static struct Qdisc *htb_leaf(struct Qdisc *sch, unsigned long arg)
1da177e4 1489{
87990467 1490 struct htb_class *cl = (struct htb_class *)arg;
11957be2 1491 return !cl->level ? cl->leaf.q : NULL;
1da177e4
LT
1492}
1493
256d61b8
PM
1494static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg)
1495{
1496 struct htb_class *cl = (struct htb_class *)arg;
1497
95946658 1498 htb_deactivate(qdisc_priv(sch), cl);
256d61b8
PM
1499}
1500
160d5e10
JP
1501static inline int htb_parent_last_child(struct htb_class *cl)
1502{
1503 if (!cl->parent)
1504 /* the root class */
1505 return 0;
42077599 1506 if (cl->parent->children > 1)
160d5e10
JP
1507 /* not the last child */
1508 return 0;
160d5e10
JP
1509 return 1;
1510}
1511
d03b195b 1512static void htb_parent_to_leaf(struct Qdisc *sch, struct htb_class *cl,
3ba08b00 1513 struct Qdisc *new_q)
160d5e10 1514{
d03b195b 1515 struct htb_sched *q = qdisc_priv(sch);
160d5e10
JP
1516 struct htb_class *parent = cl->parent;
1517
11957be2 1518 WARN_ON(cl->level || !cl->leaf.q || cl->prio_activity);
160d5e10 1519
3ba08b00 1520 if (parent->cmode != HTB_CAN_SEND)
c9364636
ED
1521 htb_safe_rb_erase(&parent->pq_node,
1522 &q->hlevel[parent->level].wait_pq);
3ba08b00 1523
160d5e10 1524 parent->level = 0;
11957be2
CW
1525 memset(&parent->inner, 0, sizeof(parent->inner));
1526 parent->leaf.q = new_q ? new_q : &noop_qdisc;
160d5e10
JP
1527 parent->tokens = parent->buffer;
1528 parent->ctokens = parent->cbuffer;
d2de875c 1529 parent->t_c = ktime_get_ns();
160d5e10 1530 parent->cmode = HTB_CAN_SEND;
ca49bfd9
MM
1531 if (q->offload)
1532 parent->leaf.offload_queue = cl->leaf.offload_queue;
160d5e10
JP
1533}
1534
d03b195b
MM
1535static void htb_parent_to_leaf_offload(struct Qdisc *sch,
1536 struct netdev_queue *dev_queue,
1537 struct Qdisc *new_q)
1538{
1539 struct Qdisc *old_q;
1540
1541 /* One ref for cl->leaf.q, the other for dev_queue->qdisc. */
944d671d
YW
1542 if (new_q)
1543 qdisc_refcount_inc(new_q);
d03b195b
MM
1544 old_q = htb_graft_helper(dev_queue, new_q);
1545 WARN_ON(!(old_q->flags & TCQ_F_BUILTIN));
1546}
1547
1548static int htb_destroy_class_offload(struct Qdisc *sch, struct htb_class *cl,
1549 bool last_child, bool destroying,
1550 struct netlink_ext_ack *extack)
1551{
1552 struct tc_htb_qopt_offload offload_opt;
ca49bfd9 1553 struct netdev_queue *dev_queue;
d03b195b 1554 struct Qdisc *q = cl->leaf.q;
a22b7388 1555 struct Qdisc *old;
d03b195b
MM
1556 int err;
1557
1558 if (cl->level)
1559 return -EINVAL;
1560
1561 WARN_ON(!q);
ca49bfd9 1562 dev_queue = htb_offload_get_queue(cl);
a22b7388
RR
1563 /* When destroying, caller qdisc_graft grafts the new qdisc and invokes
1564 * qdisc_put for the qdisc being destroyed. htb_destroy_class_offload
1565 * does not need to graft or qdisc_put the qdisc being destroyed.
1566 */
1567 if (!destroying) {
1568 old = htb_graft_helper(dev_queue, NULL);
1569 /* Last qdisc grafted should be the same as cl->leaf.q when
1570 * calling htb_delete.
d03b195b 1571 */
d03b195b 1572 WARN_ON(old != q);
a22b7388 1573 }
d03b195b 1574
83271586 1575 if (cl->parent) {
f56940da 1576 _bstats_update(&cl->parent->bstats_bias,
50dc9a85
AD
1577 u64_stats_read(&q->bstats.bytes),
1578 u64_stats_read(&q->bstats.packets));
83271586
MM
1579 }
1580
d03b195b
MM
1581 offload_opt = (struct tc_htb_qopt_offload) {
1582 .command = !last_child ? TC_HTB_LEAF_DEL :
1583 destroying ? TC_HTB_LEAF_DEL_LAST_FORCE :
1584 TC_HTB_LEAF_DEL_LAST,
1585 .classid = cl->common.classid,
1586 .extack = extack,
1587 };
1588 err = htb_offload(qdisc_dev(sch), &offload_opt);
1589
a22b7388
RR
1590 if (!destroying) {
1591 if (!err)
1592 qdisc_put(old);
1593 else
1594 htb_graft_helper(dev_queue, old);
1595 }
d03b195b
MM
1596
1597 if (last_child)
1598 return err;
1599
ca49bfd9
MM
1600 if (!err && offload_opt.classid != TC_H_MIN(cl->common.classid)) {
1601 u32 classid = TC_H_MAJ(sch->handle) |
1602 TC_H_MIN(offload_opt.classid);
1603 struct htb_class *moved_cl = htb_find(classid, sch);
1604
1605 htb_offload_move_qdisc(sch, moved_cl, cl, destroying);
d03b195b
MM
1606 }
1607
1608 return err;
1609}
1610
87990467 1611static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
1da177e4 1612{
1da177e4 1613 if (!cl->level) {
11957be2 1614 WARN_ON(!cl->leaf.q);
86bd446b 1615 qdisc_put(cl->leaf.q);
1da177e4 1616 }
1c0d32fd 1617 gen_kill_estimator(&cl->rate_est);
6529eaba 1618 tcf_block_put(cl->block);
1da177e4
LT
1619 kfree(cl);
1620}
1621
87990467 1622static void htb_destroy(struct Qdisc *sch)
1da177e4 1623{
d03b195b
MM
1624 struct net_device *dev = qdisc_dev(sch);
1625 struct tc_htb_qopt_offload offload_opt;
1da177e4 1626 struct htb_sched *q = qdisc_priv(sch);
b67bfe0d 1627 struct hlist_node *next;
d03b195b 1628 bool nonempty, changed;
fbd8f137
PM
1629 struct htb_class *cl;
1630 unsigned int i;
1da177e4 1631
1224736d 1632 cancel_work_sync(&q->work);
fb983d45 1633 qdisc_watchdog_cancel(&q->watchdog);
1da177e4 1634 /* This line used to be after htb_destroy_class call below
cc7ec456
ED
1635 * and surprisingly it worked in 2.4. But it must precede it
1636 * because filter need its target class alive to be able to call
1637 * unbind_filter on it (without Oops).
1638 */
6529eaba 1639 tcf_block_put(q->block);
87990467 1640
f4c1f3e0 1641 for (i = 0; i < q->clhash.hashsize; i++) {
89890422 1642 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
6529eaba 1643 tcf_block_put(cl->block);
89890422
KK
1644 cl->block = NULL;
1645 }
fbd8f137 1646 }
d03b195b
MM
1647
1648 do {
1649 nonempty = false;
1650 changed = false;
1651 for (i = 0; i < q->clhash.hashsize; i++) {
1652 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
1653 common.hnode) {
1654 bool last_child;
1655
1656 if (!q->offload) {
1657 htb_destroy_class(sch, cl);
1658 continue;
1659 }
1660
1661 nonempty = true;
1662
1663 if (cl->level)
1664 continue;
1665
1666 changed = true;
1667
1668 last_child = htb_parent_last_child(cl);
1669 htb_destroy_class_offload(sch, cl, last_child,
1670 true, NULL);
1671 qdisc_class_hash_remove(&q->clhash,
1672 &cl->common);
1673 if (cl->parent)
1674 cl->parent->children--;
1675 if (last_child)
1676 htb_parent_to_leaf(sch, cl, NULL);
1677 htb_destroy_class(sch, cl);
1678 }
1679 }
1680 } while (changed);
1681 WARN_ON(nonempty);
1682
f4c1f3e0 1683 qdisc_class_hash_destroy(&q->clhash);
a5a9f534 1684 __qdisc_reset_queue(&q->direct_queue);
d03b195b 1685
d59f4e1d
ZS
1686 if (q->offload) {
1687 offload_opt = (struct tc_htb_qopt_offload) {
1688 .command = TC_HTB_DESTROY,
1689 };
1690 htb_offload(dev, &offload_opt);
1691 }
d03b195b
MM
1692
1693 if (!q->direct_qdiscs)
1694 return;
1695 for (i = 0; i < q->num_direct_qdiscs && q->direct_qdiscs[i]; i++)
1696 qdisc_put(q->direct_qdiscs[i]);
1697 kfree(q->direct_qdiscs);
1da177e4
LT
1698}
1699
4dd78a73
MM
1700static int htb_delete(struct Qdisc *sch, unsigned long arg,
1701 struct netlink_ext_ack *extack)
1da177e4
LT
1702{
1703 struct htb_sched *q = qdisc_priv(sch);
87990467 1704 struct htb_class *cl = (struct htb_class *)arg;
160d5e10
JP
1705 struct Qdisc *new_q = NULL;
1706 int last_child = 0;
d03b195b 1707 int err;
1da177e4 1708
a071d272
YY
1709 /* TODO: why don't allow to delete subtree ? references ? does
1710 * tc subsys guarantee us that in htb_destroy it holds no class
1711 * refs so that we can remove children safely there ?
1712 */
42077599 1713 if (cl->children || cl->filter_cnt)
1da177e4 1714 return -EBUSY;
87990467 1715
d03b195b
MM
1716 if (!cl->level && htb_parent_last_child(cl))
1717 last_child = 1;
1718
1719 if (q->offload) {
1720 err = htb_destroy_class_offload(sch, cl, last_child, false,
1721 extack);
1722 if (err)
1723 return err;
1724 }
1725
1726 if (last_child) {
ca49bfd9
MM
1727 struct netdev_queue *dev_queue = sch->dev_queue;
1728
1729 if (q->offload)
1730 dev_queue = htb_offload_get_queue(cl);
d03b195b 1731
d03b195b 1732 new_q = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
a38a9882
AA
1733 cl->parent->common.classid,
1734 NULL);
d03b195b 1735 if (q->offload) {
944d671d 1736 if (new_q)
d03b195b 1737 htb_set_lockdep_class_child(new_q);
944d671d 1738 htb_parent_to_leaf_offload(sch, dev_queue, new_q);
d03b195b 1739 }
160d5e10
JP
1740 }
1741
1da177e4 1742 sch_tree_lock(sch);
87990467 1743
e5f0e8f8
PA
1744 if (!cl->level)
1745 qdisc_purge_queue(cl->leaf.q);
814a175e 1746
f4c1f3e0
PM
1747 /* delete from hash and active; remainder in destroy_class */
1748 qdisc_class_hash_remove(&q->clhash, &cl->common);
26b284de
JP
1749 if (cl->parent)
1750 cl->parent->children--;
c38c83cb 1751
1da177e4 1752 if (cl->prio_activity)
87990467 1753 htb_deactivate(q, cl);
1da177e4 1754
fbd8f137 1755 if (cl->cmode != HTB_CAN_SEND)
c9364636
ED
1756 htb_safe_rb_erase(&cl->pq_node,
1757 &q->hlevel[cl->level].wait_pq);
fbd8f137 1758
160d5e10 1759 if (last_child)
d03b195b 1760 htb_parent_to_leaf(sch, cl, new_q);
160d5e10 1761
1da177e4 1762 sch_tree_unlock(sch);
1da177e4 1763
143976ce
WC
1764 htb_destroy_class(sch, cl);
1765 return 0;
1da177e4
LT
1766}
1767
87990467 1768static int htb_change_class(struct Qdisc *sch, u32 classid,
1e90474c 1769 u32 parentid, struct nlattr **tca,
793d81d6 1770 unsigned long *arg, struct netlink_ext_ack *extack)
1da177e4
LT
1771{
1772 int err = -EINVAL;
1773 struct htb_sched *q = qdisc_priv(sch);
87990467 1774 struct htb_class *cl = (struct htb_class *)*arg, *parent;
d03b195b 1775 struct tc_htb_qopt_offload offload_opt;
1e90474c 1776 struct nlattr *opt = tca[TCA_OPTIONS];
6906f4ed 1777 struct nlattr *tb[TCA_HTB_MAX + 1];
4ce70b4a 1778 struct Qdisc *parent_qdisc = NULL;
d03b195b 1779 struct netdev_queue *dev_queue;
1da177e4 1780 struct tc_htb_opt *hopt;
df62cdf3 1781 u64 rate64, ceil64;
da01ec4e 1782 int warn = 0;
1da177e4
LT
1783
1784 /* extract all subattrs from opt attr */
cee63723
PM
1785 if (!opt)
1786 goto failure;
1787
8cb08174 1788 err = nla_parse_nested_deprecated(tb, TCA_HTB_MAX, opt, htb_policy,
807cfded 1789 extack);
cee63723
PM
1790 if (err < 0)
1791 goto failure;
1792
1793 err = -EINVAL;
27a3421e 1794 if (tb[TCA_HTB_PARMS] == NULL)
1da177e4 1795 goto failure;
1da177e4 1796
87990467
SH
1797 parent = parentid == TC_H_ROOT ? NULL : htb_find(parentid, sch);
1798
1e90474c 1799 hopt = nla_data(tb[TCA_HTB_PARMS]);
196d97f6 1800 if (!hopt->rate.rate || !hopt->ceil.rate)
87990467 1801 goto failure;
1da177e4 1802
429c3be8
MM
1803 if (q->offload) {
1804 /* Options not supported by the offload. */
1805 if (hopt->rate.overhead || hopt->ceil.overhead) {
1806 NL_SET_ERR_MSG(extack, "HTB offload doesn't support the overhead parameter");
1807 goto failure;
1808 }
1809 if (hopt->rate.mpu || hopt->ceil.mpu) {
1810 NL_SET_ERR_MSG(extack, "HTB offload doesn't support the mpu parameter");
1811 goto failure;
1812 }
1813 if (hopt->quantum) {
1814 NL_SET_ERR_MSG(extack, "HTB offload doesn't support the quantum parameter");
1815 goto failure;
1816 }
1817 if (hopt->prio) {
1818 NL_SET_ERR_MSG(extack, "HTB offload doesn't support the prio parameter");
1819 goto failure;
1820 }
1821 }
1822
8a8e3d84 1823 /* Keeping backward compatible with rate_table based iproute2 tc */
6b1dd856 1824 if (hopt->rate.linklayer == TC_LINKLAYER_UNAWARE)
e9bc3fa2
AA
1825 qdisc_put_rtab(qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB],
1826 NULL));
6b1dd856
YY
1827
1828 if (hopt->ceil.linklayer == TC_LINKLAYER_UNAWARE)
e9bc3fa2
AA
1829 qdisc_put_rtab(qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB],
1830 NULL));
8a8e3d84 1831
d03b195b
MM
1832 rate64 = tb[TCA_HTB_RATE64] ? nla_get_u64(tb[TCA_HTB_RATE64]) : 0;
1833 ceil64 = tb[TCA_HTB_CEIL64] ? nla_get_u64(tb[TCA_HTB_CEIL64]) : 0;
1834
87990467 1835 if (!cl) { /* new class */
d03b195b
MM
1836 struct net_device *dev = qdisc_dev(sch);
1837 struct Qdisc *new_q, *old_q;
3696f625 1838 int prio;
ee39e10c 1839 struct {
1e90474c 1840 struct nlattr nla;
ee39e10c
PM
1841 struct gnet_estimator opt;
1842 } est = {
1e90474c
PM
1843 .nla = {
1844 .nla_len = nla_attr_size(sizeof(est.opt)),
1845 .nla_type = TCA_RATE,
ee39e10c
PM
1846 },
1847 .opt = {
1848 /* 4s interval, 16s averaging constant */
1849 .interval = 2,
1850 .ewma_log = 2,
1851 },
1852 };
3696f625 1853
1da177e4 1854 /* check for valid classid */
f64f9e71
JP
1855 if (!classid || TC_H_MAJ(classid ^ sch->handle) ||
1856 htb_find(classid, sch))
1da177e4
LT
1857 goto failure;
1858
1859 /* check maximal depth */
1860 if (parent && parent->parent && parent->parent->level < 2) {
807cfded 1861 NL_SET_ERR_MSG_MOD(extack, "tree is too deep");
1da177e4
LT
1862 goto failure;
1863 }
1864 err = -ENOBUFS;
cc7ec456
ED
1865 cl = kzalloc(sizeof(*cl), GFP_KERNEL);
1866 if (!cl)
1da177e4 1867 goto failure;
87990467 1868
50dc9a85
AD
1869 gnet_stats_basic_sync_init(&cl->bstats);
1870 gnet_stats_basic_sync_init(&cl->bstats_bias);
67c9e627 1871
8d1a77f9 1872 err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
6529eaba
JP
1873 if (err) {
1874 kfree(cl);
1875 goto failure;
1876 }
64153ce0 1877 if (htb_rate_est || tca[TCA_RATE]) {
22e0f8b9
JF
1878 err = gen_new_estimator(&cl->bstats, NULL,
1879 &cl->rate_est,
edb09eb1 1880 NULL,
29cbcd85 1881 true,
64153ce0 1882 tca[TCA_RATE] ? : &est.nla);
d03b195b
MM
1883 if (err)
1884 goto err_block_put;
71bcb09a
SH
1885 }
1886
42077599 1887 cl->children = 0;
3696f625
SH
1888 RB_CLEAR_NODE(&cl->pq_node);
1889
1890 for (prio = 0; prio < TC_HTB_NUMPRIO; prio++)
1891 RB_CLEAR_NODE(&cl->node[prio]);
1da177e4 1892
d03b195b
MM
1893 cl->common.classid = classid;
1894
1895 /* Make sure nothing interrupts us in between of two
1896 * ndo_setup_tc calls.
1897 */
1898 ASSERT_RTNL();
1899
1da177e4 1900 /* create leaf qdisc early because it uses kmalloc(GFP_KERNEL)
cc7ec456
ED
1901 * so that can't be used inside of sch_tree_lock
1902 * -- thanks to Karlis Peisenieks
1903 */
d03b195b
MM
1904 if (!q->offload) {
1905 dev_queue = sch->dev_queue;
1906 } else if (!(parent && !parent->level)) {
1907 /* Assign a dev_queue to this classid. */
1908 offload_opt = (struct tc_htb_qopt_offload) {
1909 .command = TC_HTB_LEAF_ALLOC_QUEUE,
1910 .classid = cl->common.classid,
1911 .parent_classid = parent ?
1912 TC_H_MIN(parent->common.classid) :
1913 TC_HTB_CLASSID_ROOT,
1914 .rate = max_t(u64, hopt->rate.rate, rate64),
1915 .ceil = max_t(u64, hopt->ceil.rate, ceil64),
1916 .extack = extack,
1917 };
1918 err = htb_offload(dev, &offload_opt);
1919 if (err) {
807cfded
PT
1920 NL_SET_ERR_MSG_WEAK(extack,
1921 "Failed to offload TC_HTB_LEAF_ALLOC_QUEUE");
d03b195b
MM
1922 goto err_kill_estimator;
1923 }
1924 dev_queue = netdev_get_tx_queue(dev, offload_opt.qid);
1925 } else { /* First child. */
ca49bfd9 1926 dev_queue = htb_offload_get_queue(parent);
d03b195b
MM
1927 old_q = htb_graft_helper(dev_queue, NULL);
1928 WARN_ON(old_q != parent->leaf.q);
1929 offload_opt = (struct tc_htb_qopt_offload) {
1930 .command = TC_HTB_LEAF_TO_INNER,
1931 .classid = cl->common.classid,
1932 .parent_classid =
1933 TC_H_MIN(parent->common.classid),
1934 .rate = max_t(u64, hopt->rate.rate, rate64),
1935 .ceil = max_t(u64, hopt->ceil.rate, ceil64),
1936 .extack = extack,
1937 };
1938 err = htb_offload(dev, &offload_opt);
1939 if (err) {
807cfded
PT
1940 NL_SET_ERR_MSG_WEAK(extack,
1941 "Failed to offload TC_HTB_LEAF_TO_INNER");
d03b195b
MM
1942 htb_graft_helper(dev_queue, old_q);
1943 goto err_kill_estimator;
1944 }
f56940da 1945 _bstats_update(&parent->bstats_bias,
50dc9a85
AD
1946 u64_stats_read(&old_q->bstats.bytes),
1947 u64_stats_read(&old_q->bstats.packets));
d03b195b
MM
1948 qdisc_put(old_q);
1949 }
1950 new_q = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
a38a9882 1951 classid, NULL);
d03b195b
MM
1952 if (q->offload) {
1953 if (new_q) {
1954 htb_set_lockdep_class_child(new_q);
1955 /* One ref for cl->leaf.q, the other for
1956 * dev_queue->qdisc.
1957 */
1958 qdisc_refcount_inc(new_q);
1959 }
1960 old_q = htb_graft_helper(dev_queue, new_q);
1961 /* No qdisc_put needed. */
1962 WARN_ON(!(old_q->flags & TCQ_F_BUILTIN));
1963 }
1da177e4
LT
1964 sch_tree_lock(sch);
1965 if (parent && !parent->level) {
1966 /* turn parent into inner node */
e5f0e8f8 1967 qdisc_purge_queue(parent->leaf.q);
4ce70b4a 1968 parent_qdisc = parent->leaf.q;
87990467
SH
1969 if (parent->prio_activity)
1970 htb_deactivate(q, parent);
1da177e4
LT
1971
1972 /* remove from evt list because of level change */
1973 if (parent->cmode != HTB_CAN_SEND) {
c9364636 1974 htb_safe_rb_erase(&parent->pq_node, &q->hlevel[0].wait_pq);
1da177e4
LT
1975 parent->cmode = HTB_CAN_SEND;
1976 }
1977 parent->level = (parent->parent ? parent->parent->level
87990467 1978 : TC_HTB_MAXDEPTH) - 1;
11957be2 1979 memset(&parent->inner, 0, sizeof(parent->inner));
1da177e4 1980 }
d03b195b 1981
1da177e4 1982 /* leaf (we) needs elementary qdisc */
11957be2 1983 cl->leaf.q = new_q ? new_q : &noop_qdisc;
ca49bfd9
MM
1984 if (q->offload)
1985 cl->leaf.offload_queue = dev_queue;
1da177e4 1986
87990467 1987 cl->parent = parent;
1da177e4
LT
1988
1989 /* set class to be in HTB_CAN_SEND state */
b9a7afde
JP
1990 cl->tokens = PSCHED_TICKS2NS(hopt->buffer);
1991 cl->ctokens = PSCHED_TICKS2NS(hopt->cbuffer);
5343a7f8 1992 cl->mbuffer = 60ULL * NSEC_PER_SEC; /* 1min */
d2de875c 1993 cl->t_c = ktime_get_ns();
1da177e4
LT
1994 cl->cmode = HTB_CAN_SEND;
1995
1996 /* attach to the hash list and parent's family */
f4c1f3e0 1997 qdisc_class_hash_insert(&q->clhash, &cl->common);
42077599
PM
1998 if (parent)
1999 parent->children++;
11957be2
CW
2000 if (cl->leaf.q != &noop_qdisc)
2001 qdisc_hash_add(cl->leaf.q, true);
ee39e10c 2002 } else {
71bcb09a 2003 if (tca[TCA_RATE]) {
22e0f8b9
JF
2004 err = gen_replace_estimator(&cl->bstats, NULL,
2005 &cl->rate_est,
edb09eb1 2006 NULL,
29cbcd85 2007 true,
71bcb09a
SH
2008 tca[TCA_RATE]);
2009 if (err)
2010 return err;
2011 }
1da177e4 2012
d03b195b
MM
2013 if (q->offload) {
2014 struct net_device *dev = qdisc_dev(sch);
2015
2016 offload_opt = (struct tc_htb_qopt_offload) {
2017 .command = TC_HTB_NODE_MODIFY,
2018 .classid = cl->common.classid,
2019 .rate = max_t(u64, hopt->rate.rate, rate64),
2020 .ceil = max_t(u64, hopt->ceil.rate, ceil64),
2021 .extack = extack,
2022 };
2023 err = htb_offload(dev, &offload_opt);
2024 if (err)
2025 /* Estimator was replaced, and rollback may fail
2026 * as well, so we don't try to recover it, and
2027 * the estimator won't work property with the
2028 * offload anyway, because bstats are updated
2029 * only when the stats are queried.
2030 */
2031 return err;
2032 }
1598f7cb 2033
d03b195b
MM
2034 sch_tree_lock(sch);
2035 }
1598f7cb
YY
2036
2037 psched_ratecfg_precompute(&cl->rate, &hopt->rate, rate64);
2038 psched_ratecfg_precompute(&cl->ceil, &hopt->ceil, ceil64);
2039
1da177e4 2040 /* it used to be a nasty bug here, we have to check that node
11957be2 2041 * is really leaf before changing cl->leaf !
cc7ec456 2042 */
1da177e4 2043 if (!cl->level) {
1598f7cb
YY
2044 u64 quantum = cl->rate.rate_bytes_ps;
2045
2046 do_div(quantum, q->rate2quantum);
2047 cl->quantum = min_t(u64, quantum, INT_MAX);
2048
c19f7a34 2049 if (!hopt->quantum && cl->quantum < 1000) {
da01ec4e 2050 warn = -1;
c19f7a34 2051 cl->quantum = 1000;
1da177e4 2052 }
c19f7a34 2053 if (!hopt->quantum && cl->quantum > 200000) {
da01ec4e 2054 warn = 1;
c19f7a34 2055 cl->quantum = 200000;
1da177e4
LT
2056 }
2057 if (hopt->quantum)
c19f7a34
JP
2058 cl->quantum = hopt->quantum;
2059 if ((cl->prio = hopt->prio) >= TC_HTB_NUMPRIO)
2060 cl->prio = TC_HTB_NUMPRIO - 1;
1da177e4
LT
2061 }
2062
324f5aa5 2063 cl->buffer = PSCHED_TICKS2NS(hopt->buffer);
f3ad857e 2064 cl->cbuffer = PSCHED_TICKS2NS(hopt->cbuffer);
56b765b7 2065
1da177e4 2066 sch_tree_unlock(sch);
4ce70b4a 2067 qdisc_put(parent_qdisc);
1da177e4 2068
da01ec4e 2069 if (warn)
807cfded
PT
2070 NL_SET_ERR_MSG_FMT_MOD(extack,
2071 "quantum of class %X is %s. Consider r2q change.",
2072 cl->common.classid, (warn == -1 ? "small" : "big"));
da01ec4e 2073
f4c1f3e0
PM
2074 qdisc_class_hash_grow(sch, &q->clhash);
2075
1da177e4
LT
2076 *arg = (unsigned long)cl;
2077 return 0;
2078
d03b195b
MM
2079err_kill_estimator:
2080 gen_kill_estimator(&cl->rate_est);
2081err_block_put:
2082 tcf_block_put(cl->block);
2083 kfree(cl);
1da177e4 2084failure:
1da177e4
LT
2085 return err;
2086}
2087
cbaacc4e
AA
2088static struct tcf_block *htb_tcf_block(struct Qdisc *sch, unsigned long arg,
2089 struct netlink_ext_ack *extack)
1da177e4
LT
2090{
2091 struct htb_sched *q = qdisc_priv(sch);
2092 struct htb_class *cl = (struct htb_class *)arg;
3bf72957 2093
6529eaba 2094 return cl ? cl->block : q->block;
1da177e4
LT
2095}
2096
2097static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent,
87990467 2098 u32 classid)
1da177e4 2099{
87990467 2100 struct htb_class *cl = htb_find(classid, sch);
3bf72957 2101
1da177e4 2102 /*if (cl && !cl->level) return 0;
cc7ec456
ED
2103 * The line above used to be there to prevent attaching filters to
2104 * leaves. But at least tc_index filter uses this just to get class
2105 * for other reasons so that we have to allow for it.
2106 * ----
2107 * 19.6.2002 As Werner explained it is ok - bind filter is just
2108 * another way to "lock" the class - unlike "get" this lock can
2109 * be broken by class during destroy IIUC.
1da177e4 2110 */
87990467
SH
2111 if (cl)
2112 cl->filter_cnt++;
1da177e4
LT
2113 return (unsigned long)cl;
2114}
2115
2116static void htb_unbind_filter(struct Qdisc *sch, unsigned long arg)
2117{
1da177e4 2118 struct htb_class *cl = (struct htb_class *)arg;
3bf72957 2119
87990467
SH
2120 if (cl)
2121 cl->filter_cnt--;
1da177e4
LT
2122}
2123
2124static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg)
2125{
2126 struct htb_sched *q = qdisc_priv(sch);
f4c1f3e0 2127 struct htb_class *cl;
f4c1f3e0 2128 unsigned int i;
1da177e4
LT
2129
2130 if (arg->stop)
2131 return;
2132
f4c1f3e0 2133 for (i = 0; i < q->clhash.hashsize; i++) {
b67bfe0d 2134 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
e046fa89 2135 if (!tc_qdisc_stats_dump(sch, (unsigned long)cl, arg))
1da177e4 2136 return;
1da177e4
LT
2137 }
2138 }
2139}
2140
20fea08b 2141static const struct Qdisc_class_ops htb_class_ops = {
d03b195b 2142 .select_queue = htb_select_queue,
1da177e4
LT
2143 .graft = htb_graft,
2144 .leaf = htb_leaf,
256d61b8 2145 .qlen_notify = htb_qlen_notify,
143976ce 2146 .find = htb_search,
1da177e4
LT
2147 .change = htb_change_class,
2148 .delete = htb_delete,
2149 .walk = htb_walk,
6529eaba 2150 .tcf_block = htb_tcf_block,
1da177e4
LT
2151 .bind_tcf = htb_bind_filter,
2152 .unbind_tcf = htb_unbind_filter,
2153 .dump = htb_dump_class,
2154 .dump_stats = htb_dump_class_stats,
2155};
2156
20fea08b 2157static struct Qdisc_ops htb_qdisc_ops __read_mostly = {
1da177e4
LT
2158 .cl_ops = &htb_class_ops,
2159 .id = "htb",
2160 .priv_size = sizeof(struct htb_sched),
2161 .enqueue = htb_enqueue,
2162 .dequeue = htb_dequeue,
77be155c 2163 .peek = qdisc_peek_dequeued,
1da177e4 2164 .init = htb_init,
d03b195b 2165 .attach = htb_attach,
1da177e4
LT
2166 .reset = htb_reset,
2167 .destroy = htb_destroy,
1da177e4
LT
2168 .dump = htb_dump,
2169 .owner = THIS_MODULE,
2170};
2171
2172static int __init htb_module_init(void)
2173{
87990467 2174 return register_qdisc(&htb_qdisc_ops);
1da177e4 2175}
87990467 2176static void __exit htb_module_exit(void)
1da177e4 2177{
87990467 2178 unregister_qdisc(&htb_qdisc_ops);
1da177e4 2179}
87990467 2180
1da177e4
LT
2181module_init(htb_module_init)
2182module_exit(htb_module_exit)
2183MODULE_LICENSE("GPL");